input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= {'minx': xmin, 'miny': ymin, 'maxx':xmax, 'maxy':ymax}
wkt = template % r1
if srs_wkt is not None:srs=osr.SpatialReference(wkt=srs_wkt)
geom = ogr.CreateGeometryFromWkt(wkt,srs)
return geom
def ReprojectGeom(geom,src_srs,tgt_srs):
''' Reproject a geometry object.
@type geom: C{ogr.Geometry}
@param geom: OGR geometry object
@type src_srs: C{osr.SpatialReference}
@param src_srs: OSR SpatialReference object
@type tgt_srs: C{osr.SpatialReference}
@param tgt_srs: OSR SpatialReference object
@rtype: C{ogr.Geometry}
@return: OGRGeometry object
'''
gdal.ErrorReset()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
geom.AssignSpatialReference(src_srs)
geom.TransformTo(tgt_srs)
err = gdal.GetLastErrorMsg()
if err:warnings.warn(err.replace('\n',' '))
gdal.PopErrorHandler()
gdal.ErrorReset()
return geom
def InvGeoTransform(gt_in):
'''
************************************************************************
* InvGeoTransform(gt_in)
************************************************************************
**
* Invert Geotransform.
*
* This function will invert a standard 3x2 set of GeoTransform coefficients.
*
* @param gt_in Input geotransform (six doubles - unaltered).
* @return gt_out Output geotransform (six doubles - updated) on success,
* None if the equation is uninvertable.
'''
# ******************************************************************************
# * This code ported from GDALInvGeoTransform() in gdaltransformer.cpp
# * as it isn't exposed in the python SWIG bindings until GDAL 1.7
# * copyright & permission notices included below as per conditions.
#
# ******************************************************************************
# * $Id: gdaltransformer.cpp 15024 2008-07-24 19:25:06Z rouault $
# *
# * Project: Mapinfo Image Warper
# * Purpose: Implementation of one or more GDALTrasformerFunc types, including
# * the GenImgProj (general image reprojector) transformer.
# * Author: <NAME>, <EMAIL>
# *
# ******************************************************************************
# * Copyright (c) 2002, i3 - information integration and imaging
# * <NAME>, CO
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************
# we assume a 3rd row that is [1 0 0]
# Compute determinate
det = gt_in[1] * gt_in[5] - gt_in[2] * gt_in[4]
if( abs(det) < 0.000000000000001 ):
return
inv_det = 1.0 / det
# compute adjoint, and divide by determinate
gt_out = [0,0,0,0,0,0]
gt_out[1] = gt_in[5] * inv_det
gt_out[4] = -gt_in[4] * inv_det
gt_out[2] = -gt_in[2] * inv_det
gt_out[5] = gt_in[1] * inv_det
gt_out[0] = ( gt_in[2] * gt_in[3] - gt_in[0] * gt_in[5]) * inv_det
gt_out[3] = (-gt_in[1] * gt_in[3] + gt_in[0] * gt_in[4]) * inv_det
return gt_out
def ApplyGeoTransform(inx,iny,gt):
''' Apply a geotransform
@param inx: Input x coordinate (double)
@param iny: Input y coordinate (double)
@param gt: Input geotransform (six doubles)
@return: outx,outy Output coordinates (two doubles)
'''
outx = gt[0] + inx*gt[1] + iny*gt[2]
outy = gt[3] + inx*gt[4] + iny*gt[5]
return (outx,outy)
def MapToPixel(mx,my,gt):
''' Convert map to pixel coordinates
@param mx: Input map x coordinate (double)
@param my: Input map y coordinate (double)
@param gt: Input geotransform (six doubles)
@return: px,py Output coordinates (two ints)
@change: changed int(p[x,y]+0.5) to int(p[x,y]) as per http://lists.osgeo.org/pipermail/gdal-dev/2010-June/024956.html
@change: return floats
@note: 0,0 is UL corner of UL pixel, 0.5,0.5 is centre of UL pixel
'''
if gt[2]+gt[4]==0: #Simple calc, no inversion required
px = (mx - gt[0]) / gt[1]
py = (my - gt[3]) / gt[5]
else:
px,py=ApplyGeoTransform(mx,my,InvGeoTransform(gt))
#return int(px),int(py)
return px,py
def PixelToMap(px,py,gt):
''' Convert pixel to map coordinates
@param px: Input pixel x coordinate (double)
@param py: Input pixel y coordinate (double)
@param gt: Input geotransform (six doubles)
@return: mx,my Output coordinates (two doubles)
@note: 0,0 is UL corner of UL pixel, 0.5,0.5 is centre of UL pixel
'''
mx,my=ApplyGeoTransform(px,py,gt)
return mx,my
#========================================================================================================
#{VSIMEM utilities
#========================================================================================================
def read_vsimem(fn):
'''Read GDAL vsimem files
@type fn: C{str}
@param fn: Filename
@return: File contents
'''
vsifile = gdal.VSIFOpenL(fn,'r')
gdal.VSIFSeekL(vsifile, 0, 2)
vsileng = gdal.VSIFTellL(vsifile)
gdal.VSIFSeekL(vsifile, 0, 0)
return gdal.VSIFReadL(1, vsileng, vsifile)
def write_vsimem(fn,data):
'''Write GDAL vsimem files
@type fn: C{str}
@param fn: Filename
@param data: Data to write
@return: 0 on success or -1 on failure.
'''
vsifile = gdal.VSIFOpenL(fn,'w')
size = len(data)
gdal.VSIFWriteL(data, 1, size, vsifile)
return gdal.VSIFCloseL(vsifile)
#========================================================================================================
#{VRT Utilities
#========================================================================================================
def CreateVRTCopy(ds):
''' Create a VRT copy of a gdal.Dataset object.
@type ds: C{gdal.Dataset}
@param ds: Dataset object
@rtype: C{gdal.Dataset}
@return: VRT Dataset object
'''
try:
vrtdrv=gdal.GetDriverByName('VRT')
vrtds=vrtdrv.CreateCopy('',ds)
return vrtds
except:
return None
def CreateMosaicedVRT(files,bands,srcrects,dstrects,cols,rows,datatype,relativeToVRT=0):
''' Create a VRT XML string that mosaics datasets.
For further info on VRT's, see the U{GDAL VRT Tutorial<http://www.gdal.org/gdal_vrttut.html>}
@type files: C{[str,...,str]}
@param files: List of files to mosaic
@type bands: C{[int,...,int]}
@param bands: List of band numbers (1 based). Eg. [1,2,3] will mosaic
the first band from each file into the 1st band of the output VRT, etc.
@type srcrects: C{[SrcRect,...,SrcRect]}
@param srcrects: List of SrcRects, one per file, in image not map units. E.g [[0,0,512,512],...]
will be output as <SrcRect xOff="0" yOff="0" xSize="512" ySize="512"/>.
The SrcRect allows you to subset your input image.
@type dstrects: C{[DstRect,...,DstRect]}
@param dstrects: List of DstRects, One per file, in image not map units. E.g [[512,512,1024,1024],...]
will be output as <DstRect xOff="512" yOff="512" xSize="1024" ySize="1024"/>
The DstRect determines the spatial position of the input image in the mosaic.
@type cols: C{int}
@param cols: The number of columns in the output mosaic
@type rows: C{int}
@param rows: The number of rows in the output mosaic
@type datatype: C{str}
@param datatype: GDAL datatype name. Eg. Byte, Int32, UInt16
@rtype: C{xml}
@return: VRT XML string
'''
try:
vrt=[]
for i,band in enumerate(bands):
vrt.append(' <VRTRasterBand dataType="%s" band="%s">' % (datatype, i+1))
for j,f in enumerate(files):
vrt.append(' <SimpleSource>')
vrt.append(' <SourceFilename relativeToVRT="%s">%s</SourceFilename>' % (relativeToVRT,f))
vrt.append(' <SourceProperties RasterXSize="%s" RasterYSize="%s" DataType="%s"/>' % (dstrects[j][2],dstrects[j][3],datatype))
vrt.append(' <SourceBand>%s</SourceBand>' % band)
vrt.append(' <SrcRect xOff="%s" yOff="%s" xSize="%s" ySize="%s"/>' % (srcrects[j][0],srcrects[j][1],srcrects[j][2],srcrects[j][3]))
vrt.append(' <DstRect xOff="%s" yOff="%s" xSize="%s" ySize="%s"/>' % (dstrects[j][0],dstrects[j][1],dstrects[j][2],dstrects[j][3]))
vrt.append(' </SimpleSource>')
vrt.append(' </VRTRasterBand>')
return CreateCustomVRT('\n'.join(vrt),cols,rows)
except:
raise #return None
def CreateSimpleVRT(bands,cols,rows,datatype,relativeToVRT=0):
''' Create a VRT XML string with a simple source from one or more datasets,
each dataset will be output as a separate band.
For further info on VRT's, see the U{GDAL VRT Tutorial<http://www.gdal.org/gdal_vrttut.html>}
@type bands: C{[str,...,str]}
@param bands: List of files. The first file becomes the first band and so forth.
@type cols: C{int}
@param cols: The number of columns in the output VRT
@type rows: C{int}
@param rows: The number of rows in the output VRT
@type datatype: C{str}
@param datatype: GDAL datatype name. Eg. Byte, Int32, UInt16
@rtype: C{xml}
@return: VRT XML string
'''
try:
vrt=[]
for i,band in enumerate(bands):
vrt.append(' <VRTRasterBand dataType="%s" band="%s">' % (datatype, i+1))
vrt.append(' <SimpleSource>')
vrt.append(' <SourceFilename relativeToVRT="%s">%s</SourceFilename>' % (relativeToVRT,band))
vrt.append(' <SourceBand>1</SourceBand>')
vrt.append(' </SimpleSource>')
vrt.append(' </VRTRasterBand>')
return CreateCustomVRT('\n'.join(vrt),cols,rows)
except:
return None
def CreateRawRasterVRT(bands,cols,rows,datatype,headeroffset=0,byteorder=None,relativeToVRT=0,nodata=None):
''' Create RawRaster VRT from one or more _single_ band files
For further info on VRT's, see the U{GDAL VRT Tutorial<http://www.gdal.org/gdal_vrttut.html>}
@type bands: C{[str,...,str]}
@param bands: List of files. The first file becomes the first band and so forth.
@type cols: C{int}
@param cols: The number of columns in the output VRT
@type rows: C{int}
@param rows: The number of rows in the output VRT
@type datatype: C{str}
@param datatype: GDAL datatype name. Eg. Byte, Int32, UInt16
@type headeroffset: C{int}
@param headeroffset: Number of bytes to skip at the start of the file
@type byteorder: C{str}
@param byteorder: Byte order of the file (MSB or LSB)
@rtype: C{xml}
@return: | |
# Copyright (C) 2013 <NAME>
# See Copyright Notice in rominfo.py
from rominfo import RomInfoParser
class GensisParser(RomInfoParser):
"""
Parse a Sega Gensis image. Valid extensions are smd, gen, 32x, md, bin, iso, mdx.
Sega Gensis header references and related source code:
* http://www.zophar.net/fileuploads/2/10614uauyw/Genesis_ROM_Format.txt
* http://en.wikibooks.org/wiki/Genesis_Programming
* http://cgfm2.emuviews.com/txt/smdform.txt (SMD file format)
* http://raphnet.net/electronique/genesis_cart/smd2bin/smd2bin.c (SMD file format)
* loadrom.c of the Genesis Plus GX project:
* https://code.google.com/p/genplus-gx/source/browse/trunk/source/loadrom.c
* rom.c of the Gens project:
* http://gens.cvs.sourceforge.net/viewvc/gens/Gens-MultiPlatform/linux/src/gens/util/file/rom.c?view=markup
* md_slot.c of the MAME project:
* http://git.redump.net/mame/tree/src/mess/machine/md_slot.c
"""
def getValidExtensions(self):
return ["smd", "gen", "32x", "md", "bin", "iso", "mdx"]
def parse(self, filename):
props = {}
with open(filename, "rb") as f:
data = bytearray(f.read())
if len(data):
props = self.parseBuffer(data)
return props
def isValidData(self, data):
"""
Detect console name (one of two values, depending on the console's country
of origin) or the presence of an SMD header.
"""
if data[0x100 : 0x100 + 15] == b"SEGA MEGA DRIVE" or \
data[0x100 : 0x100 + 12] == b"SEGA GENESIS":
return True
if self.hasSMDHeader(data) or self.isInterleaved(data):
return True
return False
def parseBuffer(self, data):
props = {}
# TODO: If extension is .mdx, decode image
#data = [b ^ 0x40 for b in data[4 : -1]] # len(data) decreases by 5
# Auto-detect SMD/MD interleaving
if self.hasSMDHeader(data):
data = data[0x200 : ]
self.deinterleaveSMD(data)
elif self.isInterleaved(data):
self.deinterleaveMD(data)
# 0100-010f - Console name, can be "SEGA MEGA DRIVE" or "SEGA GENESIS"
# depending on the console's country of origin.
props["console"] = self._sanitize(data[0x100 : 0x100 + 16])
# 0110-011f - Copyright notice, in most cases of this format: (C)T-XX 1988.JUL
props["copyright"] = self._sanitize(data[0x110 : 0x110 + 16])
# Publisher data is extracted from copyright notice
props["publisher"] = self.getPublisher(props["copyright"])
# 0120-014f - Domestic name, the name the game has in its country of origin
props["foreign_title"] = self._sanitize(data[0x120 : 0x120 + 48])
# 0150-017f - International name, the name the game has worldwide
props["title"] = self._sanitize(data[0x150 : 0x150 + 48])
# 0180-0181 - Type of product. Known values: GM = Game, AL = Education
# en.wikibooks.org uses AL, Genesis_ROM_Format.txt Uses Al, loadrom.c uses AI...
props["classification"] = "Game" if data[0x180 : 0x180 + 2] == b"GM" else ("Education (%s)" % data[0x180 : 0x180 + 2])
# 0183-018A - Product code (type was followed by a space)
props["code"] = self._sanitize(data[0x183 : 0x183 + 8])
# 018C-018D - Product version (code was followed by a hyphen "-")
props["version"] = self._sanitize(data[0x18c : 0x18c + 2])
# 018E-018F - Checksum
props["checksum"] = "%04X" % (data[0x18e] << 8 | data[0x18f])
# 0190-019F - I/O device support
props["device_codes"] = self._sanitize(data[0x190 : 0x190 + 16])
props["devices"] = ", ".join([genesis_devices.get(d) for d in props["device_codes"] \
if d in genesis_devices])
# 01C8-01EF - Memo
props["memo"] = self._sanitize(data[0x1c8 : 0x1c8 + 40])
# 01F0-01FF - Countries in which the product can be released. This field
# can contain up to three countries. According to
# http://www.squish.net/generator/manual.html, it may also be a
# single hex digit which represents a new-style country code.
props["country_codes"] = self._sanitize(data[0x1f0 : 0x1f0 + 16])
return props
def deinterleaveSMD(self, data):
"""
Super Magic Drive interleaved file-format (.SMD) is a non-straight-forward
file-format. It has a 512-byte header and is interleaved in 16KB blocks,
with even bytes at the beginning and odd bytes at the end.
"""
for i in range(len(data) >> 14):
block = data[i*0x4000 : (i + 1)*0x4000] # 0x4000 == 1 << 14
data[i*0x4000 : (i + 1)*0x4000 : 2], data[i*0x4000 + 1 : (i + 1)*0x4000 : 2] = \
block[0x2000 : ], block[ : 0x2000]
def hasSMDHeader(self, data):
"""
Returns true if the file was generated by a Super Magic Drive copier.
Header format (512 bytes):
Byte 00h : Size of file in 16K blocks.
Byte 01h : 03h
Byte 02h : Split file indicator (00h=single or last file, 40h=split file)
Byte 08h : AAh
Byte 09h : BBh
Byte 0Ah : 06h
Note that smd2bin.c and Gens don't check byte 01h, only bytes 08h-0Ah.
"""
if len(data) < 512:
return False
if data[0x08:0x0a] == b"\xAA\xBB\x06":
return True
# If the SMD header's binary data is corrupt or uniform zero, we still
# want to detect the header, so attempt this heuristic (used in Genesis
# Plus GX's): console text is not SEGA, size is multiple of 512, and
# there's an odd number of 512 blocks.
if data[0x100 : 0x100 + 4] != b"SEGA" and len(data) % 512 == 0 and (len(data) >> 9) % 2:
return True
# Finally, directly analyze the payload
return self.isInterleaved(data[0x200 : ])
def deinterleaveMD(self, data):
"""
Multi Game Doctor file-format (.MD) is an interleaved, non-headered format.
The interleaving it uses is equal to the SMD, but without the division in
blocks. (Even bytes at the beginning of file, odd bytes at the end. Source
correction: Genesis_ROM_Format.txt erroneously says "Even at the end, odd
at the beginning.")
"""
mid = len(data) >> 1
data[::2], data[1::2] = data[mid : ], data[ : mid]
def isInterleaved(self, data):
"""
Test for interleaved (SMD or MD) data. Tests are from md_slot.c of the
# MAME project. The data parameter assumes that SMD header has been
# stripped before being passed as an argument.
"""
# Gens checks data[0x80 : 0x81] == b"EA" (odd bytes) for evidence of
# interlacing. I think MAME also checks data[0x2080 : 0x2081] == b"SG"
# (even bytes).
if data[0x80 : 0x81] == b"EA" and data[0x2080 : 0x2081] == b"SG":
return True
# Phelios USA redump, Target Earth, Klax (Namcot)
if data[0x80 : 0x81] == b"SG" and data[0x2080 : 0x2081] == b" E":
return True
# For MD interleaving, instead of looking for odd bytes, just look for
# more even bytes. We need two tests here for different console names.
if data[0x80 : 0x80 + 4] in [b"EAMG", b"EAGN"]:
return True
# Test for edge cases. Tests are from md_slot.c of the MAME project.
# Per their comments, code is taken directly from GoodGEN by Cowering.
# Tests are for SMD interleaving which uses 16K blocks, so cases with
# addresses < 0x2000 (8K) should also be valid tests for MD interleaving.
edge_cases = [
(0x00f0, "OL R-AEAL"), # Jap Baseball 94
(0x00f3, "optrEtranet"), # Devilish Mahjong Tower
(0x0100, "\x3C\x00\x00\x3C"), # Golden Axe 2 Beta
(0x0090, "OEARC "), # Omega Race
(0x6708, " NTEBDKN"), # Budokan Beta
(0x02c0, "so fCXP"), # CDX-Pro 1.8 BIOS
(0x0090, "sio-Wyo "), # Ishido (hacked)
(0x0088, "SS CAL "), # Onslaught
(0x3648, "SG NEPIE"), # Tram Terror Pirate
(0x0007, "\x1C\x0A\xB8\x0A"), # Breath of Fire 3 Chinese
(0x1cbe, "@TTI>"), # Tetris Pirate
]
return any(data[case[0] : case[0] + len(case[1])] == case[1] for case in edge_cases)
def getPublisher(self, copyright_str):
"""
Resolve a copyright string into a publisher name. It SHOULD be 4
characters after a (C) symbol, but there are variations. When the
company uses a number as a company code, the copyright usually has
this format: '(C)T-XX 1988.JUL', where XX is the company code.
"""
company = copyright_str[3:7]
if "-" in company:
company = company[company.rindex("-") + 1 : ]
company = company.rstrip()
return gensis_publishers.get(company, "")
RomInfoParser.registerParser(GensisParser())
genesis_devices = {
"J": "3B Joypad",
"6": "6B Joypad",
"K": "Keyboard",
"P": "Printer",
"B": "Control Ball",
"F": "Floppy Drive",
"L": "Activator",
"4": "Team Player",
"0": "MS Joypad",
"R": "RS232C Serial",
"T": "Tablet",
"V": "Paddle",
"C": "CD-ROM",
"M": "Mega Mouse",
"G": "Menacer",
}
gensis_publishers = {
"ACLD": "Ballistic",
"RSI": "Razorsoft",
"SEGA": "SEGA",
"TREC": "Treco",
"VRGN": "Virgin Games",
"WSTN": "Westone",
"10": "Takara",
"11": "Taito or Accolade",
"12": "Capcom",
"13": "Data East",
"14": "Namco or Tengen",
"15": "Sunsoft",
"16": "Bandai",
"17": "Dempa",
"18": "Technosoft",
"19": "Technosoft",
"20": "Asmik",
"22": "Micronet",
"23": "<NAME>",
"24": "American Sammy",
"29": "Kyugo",
"32": "Wolfteam",
"33": "Kaneko",
"35": "Toaplan",
"36": "Tecmo",
"40": "Toaplan",
"42": "UFL Company Limited",
"43": "Human",
"45": "Game Arts",
"47": "Sage's Creation",
"48": "Tengen",
"49": "Renovation or Telenet",
"50": "Electronic Arts",
"56": "Razorsoft",
"58": "Mentrix",
"60": "Victor Musical | |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 534113920
"""
"""
random actions, total chaos
"""
board = gamma_new(8, 7, 5, 8)
assert board is not None
assert gamma_move(board, 1, 1, 4) == 1
assert gamma_move(board, 1, 7, 1) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_busy_fields(board, 2) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_move(board, 4, 1, 0) == 1
assert gamma_move(board, 4, 4, 6) == 1
assert gamma_free_fields(board, 4) == 50
assert gamma_move(board, 5, 5, 6) == 1
assert gamma_move(board, 5, 7, 0) == 1
assert gamma_move(board, 1, 7, 3) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 3, 4) == 1
assert gamma_free_fields(board, 3) == 44
assert gamma_move(board, 4, 5, 4) == 1
assert gamma_move(board, 5, 6, 1) == 1
assert gamma_move(board, 5, 1, 5) == 1
assert gamma_move(board, 1, 5, 6) == 0
assert gamma_move(board, 1, 6, 5) == 1
assert gamma_free_fields(board, 1) == 40
assert gamma_move(board, 2, 6, 0) == 1
board730595885 = gamma_board(board)
assert board730595885 is not None
assert board730595885 == ("2...45..\n"
".5...31.\n"
"31.3.4..\n"
".......1\n"
"........\n"
"1.....51\n"
".4....25\n")
del board730595885
board730595885 = None
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_move(board, 4, 4, 7) == 0
assert gamma_move(board, 5, 0, 2) == 1
assert gamma_move(board, 5, 4, 2) == 1
assert gamma_free_fields(board, 5) == 37
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_busy_fields(board, 1) == 6
assert gamma_free_fields(board, 1) == 36
assert gamma_move(board, 2, 1, 5) == 0
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 5) == 1
assert gamma_move(board, 3, 3, 5) == 1
assert gamma_move(board, 4, 1, 3) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 1, 4) == 0
assert gamma_move(board, 5, 2, 5) == 0
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 3, 5, 0) == 1
assert gamma_move(board, 3, 6, 3) == 1
assert gamma_golden_move(board, 3, 3, 1) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 5, 4) == 0
assert gamma_move(board, 5, 0, 4) == 0
assert gamma_move(board, 1, 6, 5) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 2, 5) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 5, 4) == 0
assert gamma_move(board, 4, 2, 7) == 0
assert gamma_move(board, 4, 0, 0) == 1
assert gamma_busy_fields(board, 4) == 5
assert gamma_move(board, 5, 6, 1) == 0
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 1, 7, 4) == 1
assert gamma_move(board, 2, 0, 3) == 1
assert gamma_move(board, 3, 3, 3) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 4, 7, 5) == 1
assert gamma_move(board, 5, 1, 5) == 0
assert gamma_golden_move(board, 5, 5, 5) == 1
board768066899 = gamma_board(board)
assert board768066899 is not None
assert board768066899 == ("2...45..\n"
".533.514\n"
"31.324.1\n"
"24.3..31\n"
"51..5...\n"
"1.....51\n"
"44..2325\n")
del board768066899
board768066899 = None
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 4, 6, 1) == 0
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_move(board, 5, 1, 4) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 4, 6, 4) == 1
assert gamma_move(board, 4, 5, 4) == 0
assert gamma_busy_fields(board, 4) == 7
assert gamma_free_fields(board, 5) == 21
assert gamma_move(board, 1, 6, 2) == 1
assert gamma_busy_fields(board, 1) == 9
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_free_fields(board, 3) == 19
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_free_fields(board, 4) == 19
assert gamma_move(board, 5, 0, 3) == 0
board589089307 = gamma_board(board)
assert board589089307 is not None
assert board589089307 == ("2...45..\n"
".533.514\n"
"31.32441\n"
"24.3..31\n"
"51.15.1.\n"
"1.23..51\n"
"44..2325\n")
del board589089307
board589089307 = None
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 1, 7, 1) == 0
assert gamma_move(board, 2, 6, 7) == 0
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 5, 1, 5) == 0
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 1) == 0
assert gamma_move(board, 3, 2, 3) == 1
assert gamma_move(board, 4, 7, 0) == 0
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_move(board, 5, 6, 2) == 0
assert gamma_golden_possible(board, 5) == 0
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_golden_move(board, 1, 3, 6) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_golden_move(board, 3, 6, 5) == 1
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_move(board, 4, 3, 2) == 0
assert gamma_move(board, 5, 6, 7) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_busy_fields(board, 1) == 9
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 5, 5, 1) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 3, 6, 6) == 1
assert gamma_move(board, 4, 6, 2) == 0
assert gamma_move(board, 4, 5, 1) == 0
assert gamma_busy_fields(board, 4) == 7
assert gamma_move(board, 5, 6, 2) == 0
assert gamma_move(board, 5, 5, 5) == 0
assert gamma_free_fields(board, 5) == 15
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 4, 5) == 1
assert gamma_move(board, 2, 5, 2) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 5, 0) == 0
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_busy_fields(board, 4) == 7
assert gamma_move(board, 5, 4, 2) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 3, 6, 3) == 0
assert gamma_free_fields(board, 3) == 13
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_free_fields(board, 4) == 12
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_golden_move(board, 2, 6, 5) == 1
assert gamma_move(board, 3, 7, 6) == 1
assert gamma_golden_move(board, 3, 6, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_move(board, 5, 6, 5) == 0
assert gamma_move(board, 5, 7, 5) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 3, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_free_fields(board, 3) == 10
assert gamma_move(board, 4, 2, 7) == 0
assert gamma_move(board, 4, 2, 1) == 0
assert gamma_move(board, 5, 5, 0) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 7, 0) == 0
assert gamma_move(board, 4, 3, 4) == 0
assert gamma_move(board, 5, 4, 2) == 0
assert gamma_move(board, 5, 5, 6) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_golden_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 4, 1, 4) == 0
assert gamma_move(board, 5, 6, 4) == 0
assert gamma_move(board, 5, 6, 6) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 3, 6, 2) | |
1887], [988, 2454], [1284, 3347], [663, 1067],
[3001, 1365], [3816, 4281], [3723, 494], [2051, 4633], [4669, 433], [2682, 4881], [485, 3333], [3939, 812],
[3098, 768], [3690, 4387], [3078, 4292], [2538, 45], [1307, 275], [1983, 147], [4551, 4662], [4076, 4929],
[1095, 1584], [912, 3754], [3724, 517], [1067, 4934], [1254, 4014], [1571, 2520], [4623, 3253], [4656, 2715],
[3397, 3255], [3865, 8], [3858, 1616], [2416, 3880], [306, 3725], [2003, 4527], [3205, 2855], [2874, 571],
[855, 837], [1230, 4049], [1521, 1629], [4015, 1527], [2652, 4949], [4287, 2636], [3113, 2920], [1159, 3921],
[1350, 2967], [708, 1998], [2014, 915], [4665, 588], [3216, 1392], [1750, 490], [2446, 3365], [2145, 1681],
[3309, 1151], [18, 338], [1205, 2979], [1137, 1048], [1882, 2288], [4083, 1822], [387, 4725], [1456, 270],
[2944, 2228], [214, 4369], [4106, 4717], [1623, 91], [110, 2644], [3109, 293], [3919, 154], [4435, 3341],
[1404, 1779], [78, 2045], [119, 1519], [2013, 969], [1772, 3524], [3503, 3407], [855, 3346], [1616, 3371],
[2031, 3523], [3748, 2901], [4583, 1162], [3235, 4978], [4202, 1826], [3809, 2030], [1574, 790], [4131, 4835],
[1449, 2409], [4450, 2529], [3240, 141], [647, 4672], [3104, 2372], [2286, 2599], [1506, 89], [3166, 1649],
[2070, 1654], [2499, 3971], [4952, 3096], [3137, 2420], [2103, 1793], [3574, 2], [2268, 1683], [2902, 1652],
[1904, 1910], [3473, 4724], [1881, 1378], [2676, 3632], [2233, 2128], [1902, 937], [3139, 1014], [4793, 1425],
[664, 1789], [2374, 4009], [3825, 834], [971, 2855], [2200, 1982], [3730, 1140], [48, 1267], [4491, 696],
[4011, 1744], [1863, 2263], [2928, 23], [181, 1457], [4500, 3100], [1273, 2123], [1910, 4284], [288, 3713],
[3471, 106], [1821, 989], [3975, 2245], [3667, 4805], [3100, 3467], [3850, 863], [3021, 4775], [4056, 4652],
[1720, 2576], [1400, 4680], [4341, 1277], [1373, 3777], [3922, 3197], [4844, 916], [2454, 1245], [2048, 3336],
[1570, 3445], [2294, 2354], [1392, 1033], [2630, 1312], [2097, 2996], [3859, 3396], [260, 4548], [4294, 2974],
[3174, 1347], [78, 1787], [3400, 546], [1263, 2284], [3441, 2057], [775, 3174], [1473, 70], [2632, 1386],
[2771, 1493], [1450, 4469], [2048, 1475], [3534, 294], [4133, 3250], [4193, 4877], [1959, 1903], [2179, 446],
[4312, 4003], [4018, 1045], [24, 3830], [1235, 4251], [2378, 3693], [3226, 1407], [341, 1810], [2045, 1501],
[2607, 3791], [2861, 3849], [1143, 4493], [400, 2803], [2225, 1654], [2249, 2650], [2856, 667], [4313, 2632],
[321, 4502], [1, 2327], [4088, 4034], [2443, 4666], [3694, 4877], [3561, 1445], [925, 2191], [3500, 3912],
[1219, 1316], [4720, 2765], [4936, 238], [3801, 480], [2571, 218], [2160, 1974], [1699, 3179], [862, 1320],
[3916, 176], [426, 1434], [429, 4833], [875, 4356], [1540, 3214], [1134, 1114], [747, 3266], [3602, 3835],
[2514, 2017], [2346, 866], [4487, 3038], [4396, 3523], [4006, 2875], [553, 1917], [4738, 1939], [2433, 297],
[1686, 1356], [4638, 1899], [3644, 3503], [3206, 4366], [3998, 2751], [487, 1511], [1899, 3992], [2085, 2088],
[3213, 579], [2765, 4336], [2984, 3454], [2162, 4985], [1155, 2], [706, 3919], [1420, 1177], [1957, 3051],
[1951, 3012], [4562, 622], [2776, 127], [3035, 3030], [4160, 1199], [2272, 3307], [2814, 3692], [881, 1916],
[1653, 3309], [3593, 1154], [56, 2545], [1636, 1820], [2900, 3517], [3702, 2734], [4452, 1212], [1477, 4815],
[3780, 2546], [2280, 3607], [3140, 1000], [1923, 4098], [1156, 3637], [3802, 1125], [3672, 454], [4616, 3977],
[1053, 4234], [4578, 3587], [3271, 1492], [1576, 3063], [3158, 2541], [1585, 3507], [1571, 3149], [2109, 4279],
[4416, 3872], [2898, 919], [2996, 419], [4896, 2905], [2752, 4922], [3445, 699], [1883, 71], [4357, 591],
[4711, 3178], [416, 3816], [2322, 724], [3119, 4137], [974, 3751], [621, 4989], [1573, 2788], [2586, 1953],
[1538, 2138], [3041, 3294], [4112, 846], [4510, 2242], [3082, 312], [3849, 2154], [2373, 304], [701, 3895],
[1792, 2899], [534, 2720], [3940, 472], [426, 4688], [814, 461], [960, 20], [1208, 169], [1551, 822],
[1500, 2595], [743, 3624], [4522, 329], [3701, 967], [43, 4433], [1680, 3804], [2666, 3956], [4242, 3045],
[4921, 3002], [4971, 1359], [3595, 2474], [3763, 4071], [4258, 1730], [2912, 24], [2126, 1827], [3952, 3889],
[3792, 3690], [2732, 2202], [1017, 972], [2281, 604], [2949, 3437], [4761, 2656], [888, 3973], [1799, 2642],
[2161, 4196], [1506, 1867], [2520, 4827], [2319, 4245], [2714, 4208], [4169, 2235], [684, 2750], [3052, 724],
[3693, 372], [946, 4024], [1252, 1458], [3189, 1103], [2211, 330], [3563, 4959], [2701, 526], [3710, 1436],
[4256, 2982], [2348, 4481], [2942, 3784], [4207, 4293], [3558, 1641], [3038, 2438], [1812, 4011], [3018, 4539],
[2509, 2968], [447, 718], [1498, 3671], [2205, 3485], [596, 1510], [226, 4885], [515, 3620], [478, 1493],
[4368, 1062], [2955, 691], [71, 1721], [2441, 647], [4753, 4776], [1605, 4683], [3538, 305], [3272, 3718],
[294, 2052], [2243, 689], [1171, 1654], [1323, 2138], [268, 3903], [4930, 3544], [4486, 1196], [4429, 762],
[3332, 1513], [2380, 2642], [4949, 3774], [4443, 2745], [2468, 2793], [1994, 4816], [2012, 3579], [4096, 4377],
[66, 589], [218, 904], [3574, 4230], [2631, 4654]]) # 供应地坐标
coordinates1goods = []
coordinates2goods = []
def data():
for i in range(len(coordinates1)):
coordinates1goods.append(1) # 首先生成两个个禁忌表,用遗传求出tsp
for i in range(len(coordinates2)):
coordinates2goods.append(1)
data()
truck_coordinates = [[4292, 4798, 1]]
class Truck:
def __init__(self, x, y):
self.x = x
self.y = y # 起点设置为(0,0)
self.lat_x = 0
self.lat_y = 0
self.goto = 0
self.flag = 0 # 用来计数画图次数
self.drivedistance = 0.0
self.goal = "供应地"
self.goods = 0
self.buff = "待命"
self.lujing = []
self.drawpath = [[x, y]]
self.lastdrive = 0
self.current_capacity = 0 # 当前运载的货量
def __str__(self):
return '坐标: [{},{}],{}:{}当前运载的货量: {} 总共走了{}距离 正在前往{} 已经装运{}'. \
format(self.x, self.y, self.buff, self.goto, self.current_capacity, self.drivedistance, self.goal,
self.goods)
def lenth(x1, y1, x2, y2):
return math.sqrt((int(x1) - int(x2)) ** 2 + (int(y1) - int(y2)) ** 2) # 用于计算路径表
def paixudistmat(distmat): # 返回一个最近坐标表
p = []
# print(distmat[n].items())
a = sorted(distmat.items(), key=lambda x: x[1])
# print(a)
for i in range(len(a)):
p.append(a[i])
return p
def sousuodistance(listone): # 输入的是一组路径 返回路径长度
def soushuo(text, listone, i):
distance = 0
op = int(re.findall("\d+", listone[i])[0])
op2 = int(re.findall("\d+", listone[i + 1])[0])
# print(re.findall("\d+",listone[i])[0])
# print(op)
# print(op2)
if (text == "到达供应地"):
listtwo = checklist2(op)
# print(listtwo)
for j in range(len(listtwo)):
if (op2 == listtwo[j][0]): # 利用正则表达式比较地点
distance = listtwo[j][1]
# print(distance)
return distance
if (text == "到达需求地"):
listtwo = checklist1(op)
# print(listtwo)
for j in range(len(listtwo)):
if (op2 == listtwo[j][0]): # 利用正则表达式比较地点
distance = listtwo[j][1]
# print(distance)
return distance
distance = 0
for i in range(len(listone) - 1):
if (listone[i][0:5] == "到达供应地"):
distance += soushuo("到达供应地", listone, i)
if (listone[i][0:5] == "到达需求地"):
distance += soushuo("到达需求地", listone, i)
# print(distance)
return distance
def checklist1(checknum): # 最近供应地坐标
list = []
num = []
for i in range(len(coordinates2)):
list.append(
lenth(coordinates1[checknum][0], coordinates1[checknum][1], coordinates2[i][0], coordinates2[i][1]))
for i in range(len(coordinates2)):
num.append(i)
k = dict(zip(num, list))
op = paixudistmat(k)
return op
totallist1 = []
for i in range(len(coordinates1)):
totallist1.append(checklist1(i))
def checklist2(checknum): # 最近需求地坐标
list = []
num = []
for i in range(len(coordinates1)):
list.append(
lenth(coordinates2[checknum][0], coordinates2[checknum][1], coordinates1[i][0], coordinates1[i][1]))
for i in range(len(coordinates1)):
num.append(i)
k = dict(zip(num, list))
op = paixudistmat(k)
return op
totallist2 = []
for i in range(len(coordinates2)):
totallist2.append(checklist2(i))
def check(text, checknum, totallist1, totallist2):
if (text == "查找最近的供应地"):
s = 0
min = totallist1[checknum][0]
for i in range(len(coordinates2)):
if coordinates2goods[totallist1[checknum][i][0]] > 0:
s = totallist1[checknum][i][0]
min = totallist1[checknum][i][1]
return (s, min)
if (text == "查找最近的需求地"):
s = 0
min = totallist2[checknum][0]
for i in range(len(coordinates1)):
if coordinates1goods[totallist2[checknum][i][0]] > 0:
s = totallist2[checknum][i][0]
min = totallist2[checknum][i][1]
return (s, min)
# print(check("查找最近的需求地",0,totallist1,totallist2)) # 第一个参数是(查找) 第二个参数是 当前点序号 之后参数固定
def jisuan(num):
list = []
for i in range(len(coordinates2)):
# print(car_init[num].x,car_init_init[num].y, coordinates2[i][0],coordinates2[i][1])
list.append(lenth(car_init[num].x, car_init[num].y, coordinates2[i][0], coordinates2[i][1]))
# print(list)
s = 0
min = list[0]
for i in range(len(coordinates2)):
if list[i] < min:
s = i
min = list[i]
return (s, min) # 返回一个最小距离的下标和距离
car_init = []
for i in range(len(truck_coordinates)):
car_init.append(Truck(truck_coordinates[i][0], truck_coordinates[i][1]))
for i in range(len(truck_coordinates)): # 给卡车初始化
car_init[i].goto = jisuan(i)[0]
coordinates2goods[car_init[i].goto] = 0
car_init[i].drivedistance = jisuan(i)[1]
car_init[i].x = coordinates2[car_init[i].goto][0]
car_init[i].y = coordinates2[car_init[i].goto][1]
car_init[i].drawpath.append([car_init[i].x, car_init[i].y])
car_init[i].goal = "需求地"
car_init[i].current_capacity = 1
car_init[i].buff = "到达供应地"
car_init[i].lujing.append(str(car_init[i].buff) + str(car_init[i].goto))
print(car_init[i])
def transport():
def yusong2():
op = check("查找最近的供应地", car_init[i].goto, totallist1, totallist2)
car_init[i].lat_x = car_init[i].x
car_init[i].lat_y = car_init[i].y
car_init[i].x = coordinates2[op[0]][0]
car_init[i].y = coordinates2[op[0]][1]
# print(car_init[i].x,car_init[i].y)
car_init[i].goto = op[0]
car_init[i].buff = "到达供应地"
car_init[i].goal = "需求地"
car_init[i].lujing.append(str(car_init[i].buff) + str(car_init[i].goto))
car_init[i].drawpath.append([car_init[i].x, car_init[i].y]) | |
"Headlines_Cleanup",
"productType": None,
"deleteGrids": [
("Fcst", "Hazards", "SFC", "all", "all"),
],
},
### Null always distinct TK 4264
{
"name": "NullDistinct1",
"commentary": "Null Distinct from Non-Null",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (2,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 0, 6, 0, "all"),
("Fcst", "Wind", "VECTOR", 6, 12, (5,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 6, 12, 0, "all"),
],
"checkStrings": [
"Light winds becoming north around 5 mph in the afternoon",
],
},
{
"name": "NullDistinct2",
"commentary": "Null Not Distinct from Non-Null",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (2,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 0, 6, 0, "all"),
("Fcst", "Wind", "VECTOR", 6, 12, (5,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 6, 12, 0, "all"),
],
"checkStrings": [
"North winds up to 5 mph",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "add", null_distinct, "undo"),
],
},
### Null always distinct TK 4264
{
"name": "NullDistinct1",
"commentary": "Null Distinct from Non-Null",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (2,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 0, 6, 0, "all"),
("Fcst", "Wind", "VECTOR", 6, 12, (5,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 6, 12, 0, "all"),
],
"checkStrings": [
"Light winds becoming north around 5 mph in the afternoon",
],
},
{
"name": "NullDistinct2",
"commentary": "Null Not Distinct from Non-Null",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (2,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 0, 6, 0, "all"),
("Fcst", "Wind", "VECTOR", 6, 12, (5,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 6, 12, 0, "all"),
],
"checkStrings": [
"North winds up to 5 mph",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "add", null_distinct, "undo"),
],
},
### Null always distinct TK 4264
{
"name": "NullDistinct1",
"commentary": "Null Distinct from Non-Null",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (2,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 0, 6, 0, "all"),
("Fcst", "Wind", "VECTOR", 6, 12, (5,"N"), "all"),
("Fcst", "WindGust", "SCALAR", 6, 12, 0, "all"),
],
"checkStrings": [
"Light winds becoming north around 5 mph in the afternoon",
],
},
##### MaxMode and Mixed Wx (<NAME>) Tk 4450
{
"name": "MaxMode_and_MixedWx",
"commentary": "Using MaxMode for PoP with T plus K",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 6, 20, "all", 1),
("Fcst", "Sky", "SCALAR", 6, 12, 87, "all", 1),
("Fcst", "PoP", "SCALAR", 0, 6, 0, "all", 1),
("Fcst", "PoP", "SCALAR", 6, 9, 5, "all", 1),
("Fcst", "PoP", "SCALAR", 9, 12, 20, "all", 1),
("Fcst", "Wx", "WEATHER", 0, 6, "Areas:K:<NoInten>:<NoVis>:", "all"),
("Fcst", "Wx", "WEATHER", 6, 12,
"SChc:T:<NoInten>:<NoVis>:^Areas:K:<NoInten>:<NoVis>:", "all"),
],
"checkStrings": [
"Mostly sunny until late afternoon, then mostly cloudy with a 20 percent chance of thunderstorms late in the afternoon",
"Areas of smoke through the day",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "replace",
('"PoP": (5, "Max", None),', '"PoP": (5, "MaxMode", None),'), "undo"),
],
},
{
"name": "MaxMode_Only",
"commentary": "Using MaxMode for PoP with T",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 6, 20, "all", 1),
("Fcst", "Sky", "SCALAR", 6, 12, 60, "all", 1),
("Fcst", "PoP", "SCALAR", 0, 6, 0, "all", 1),
("Fcst", "PoP", "SCALAR", 6, 9, 5, "all", 1),
("Fcst", "PoP", "SCALAR", 9, 12, 20, "all", 1),
("Fcst", "Wx", "WEATHER", 0, 6, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 6, 12, "SChc:T:<NoInten>:<NoVis>:", "all"),
],
"checkStrings": [
"Mostly sunny until late afternoon, then a 20 percent chance of thunderstorms late in the afternoon",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "replace",
('"PoP": (5, "Max", None),', '"PoP": (5, "MaxMode", None),'), "undo"),
],
},
# TK 4676 PoP temporal coverage changed to zero
{
"name": "PoP_TemporalCoverage1",
"commentary": "tk4676 PoP temporal coverage changed to zero",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "PoP", "SCALAR", -3, 1, 70, "all", 1),
("Fcst", "PoP", "SCALAR", 1, 12, 60, "all", 1),
("Fcst", "Sky", "SCALAR", 0, 12, 50, "all", 1),
("Fcst", "Wx", "WEATHER", 0, 12, "Lkly:T:<NoInten>:<NoVis>:", "all"),
],
"checkStrings": [
"Thunderstorms likely",
"Chance of thunderstorms 60 percent",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "replace",
('("PoP", self.binnedPercent, [3])', '("PoP", self.binnedPercent, [12])'),
"undo"),
("Phrase_Test_Local", "TextUtility", "add", tempCov, "undo"),
],
},
# TK 4676 PoP temporal coverage changed to zero
{
"name": "PoP_TemporalCoverage2",
"commentary": "tk4676 PoP temporal coverage changed to zero",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "PoP", "SCALAR", -3, 1, 70, "all", 1),
("Fcst", "PoP", "SCALAR", 1, 12, 60, "all", 1),
("Fcst", "Sky", "SCALAR", 0, 12, 50, "all", 1),
("Fcst", "Wx", "WEATHER", 0, 12, "Lkly:T:<NoInten>:<NoVis>:", "all"),
],
"checkStrings": [
"Thunderstorms likely",
"Chance of thunderstorms 70 percent",
],
"fileChanges": [
("Phrase_Test_Local", "TextUtility", "replace",
('("PoP", self.binnedPercent, [3])', '("PoP", self.binnedPercent, [12])'),
"undo"),
],
},
### CWF Swell Phrase -- no secondary elements TK 4277
{
"name": "CWF_Swell1",
"commentary": "CWF Swell phrase: Neither null",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 12, (7,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 12, (14,"E"), "all"),
],
"checkStrings": [
"Mixed swell north around 7 feet and east around 14 feet",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell2",
"commentary": "CWF Swell phrase: Swell2 Null",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 12, (7,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 12, (0,"E"), "all"),
],
"checkStrings": [
"North swell around 7 feet",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell3",
"commentary": "CWF Swell phrase: Swell Null",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 12, (0,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 12, (14,"E"), "all"),
],
"checkStrings": [
"Swell east around 14 feet",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell4",
"commentary": "CWF Swell phrase: 2 sub-phrases, neither null",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 6, (7,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 6, (12,"E"), "all"),
("Fcst", "Swell", "VECTOR", 6, 14, (14,"NW"), "all"),
("Fcst", "Swell2", "VECTOR", 6, 14, (7,"SE"), "all"),
],
"checkStrings": [
"Mixed swell north around 7 feet and east around 12 feet increasing to northwest around 14 feet and southeast around 7 feet in the afternoon",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell5",
"commentary": "CWF Swell phrase: 2 sub-phrases, Swell2 null in both",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 6, (7,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 6, (0,"E"), "all"),
("Fcst", "Swell", "VECTOR", 6, 12, (14,"NW"), "all"),
("Fcst", "Swell2", "VECTOR", 6, 12, (0,"SE"), "all"),
],
"checkStrings": [
"North swell around 7 feet increasing to northwest around 14 feet in the afternoon",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell6",
"commentary": "CWF Swell phrase: 2 sub-phrases, Swell and Swell2 null in both",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 6, (0,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 6, (0,"E"), "all"),
("Fcst", "Swell", "VECTOR", 6, 12, (0,"NW"), "all"),
("Fcst", "Swell2", "VECTOR", 6, 12, (0,"SE"), "all"),
],
"checkStrings": [
"",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell7",
"commentary": "CWF Swell phrase: 2 sub-phrases, Swell null in both -- Combines",
"productType": "CWF",
"createGrids": [
("Fcst", "Swell", "VECTOR", 0, 6, (0,"N"), "all"),
("Fcst", "Swell2", "VECTOR", 0, 6, (14,"N"), "all"),
("Fcst", "Swell", "VECTOR", 6, 12, (0,"NW"), "all"),
("Fcst", "Swell2", "VECTOR", 6, 12, (7,"SE"), "all"),
],
"checkStrings": [
"Swell southeast 8 to 13 feet",
],
"comboFlag": 1,
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None}",
"gridsStartTime": "6am Local",
"drtTime": "4am Local",
"fileChanges":[
("CWF_<site>_Overrides", "TextUtility", "add", CWFPeriod, "undo"),
],
},
{
"name": "CWF_Swell8",
"commentary": "CWF Swell phrase: 2 sub-phrases, Swell2 null in first",
"productType": | |
<filename>skdecide/builders/domain/dynamics.py
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import functools
from typing import Optional
from skdecide.core import (
D,
DiscreteDistribution,
Distribution,
EnvironmentOutcome,
SingleValueDistribution,
TransitionOutcome,
Value,
autocastable,
)
__all__ = [
"Environment",
"Simulation",
"UncertainTransitions",
"EnumerableTransitions",
"DeterministicTransitions",
]
class Environment:
"""A domain must inherit this class if agents interact with it like a black-box environment.
Black-box environment examples include: the real world, compiled ATARI games, etc.
!!! tip
Environment domains are typically stateful: they must keep the current state or history in their memory to
compute next steps (automatically done by default in the #_memory attribute).
"""
@autocastable
def step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment.step() provides some boilerplate code and internally calls #Environment._step() (which
returns a transition outcome). The boilerplate code automatically stores next state into the #_memory attribute
and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment.step() to call the external environment and not
use the #Environment._step() helper function.
!!! warning
Before calling #Environment.step() the first time or when the end of an episode is
reached, #Initializable.reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
return self._step(action)
def _step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment._step() provides some boilerplate code and internally
calls #Environment._state_step() (which returns a transition outcome). The boilerplate code automatically stores
next state into the #_memory attribute and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment._step() to call the external environment and not
use the #Environment._state_step() helper function.
!!! warning
Before calling #Environment._step() the first time or when the end of an episode is
reached, #Initializable._reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
transition_outcome = self._state_step(action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
if self._get_memory_maxlen() == 1:
self._memory = next_state
elif self._get_memory_maxlen() > 1:
self._memory.append(next_state)
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one step of the transition's dynamics.
This is a helper function called by default from #Environment._step(). It focuses on the state level, as opposed
to the observation one for the latter.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The transition outcome of this step.
"""
raise NotImplementedError
class Simulation(Environment):
"""A domain must inherit this class if agents interact with it like a simulation.
Compared to pure environment domains, simulation ones have the additional ability to sample transitions from any
given state.
!!! tip
Simulation domains are typically stateless: they do not need to store the current state or history in memory
since it is usually passed as parameter of their functions. By default, they only become stateful whenever they
are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
return self._state_sample(self._memory, action)
@autocastable
def set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with successive #Environment.step()
calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain.set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain.step(my_action)
```
"""
return self._set_memory(memory)
def _set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with
successive #Environment._step() calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain._set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain._step(my_action)
```
"""
self._memory = memory
@autocastable
def sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation.sample() provides some boilerplate code and internally calls #Simulation._sample()
(which returns a transition outcome). The boilerplate code automatically samples an observation corresponding to
the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation.sample() to call the external simulator and not use
the #Simulation._sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
return self._sample(memory, action)
def _sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation._sample() provides some boilerplate code and internally
calls #Simulation._state_sample() (which returns a transition outcome). The boilerplate code automatically
samples an observation corresponding to the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation._sample() to call the external simulator and not use
the #Simulation._state_sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
transition_outcome = self._state_sample(memory, action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one sample of the transition's dynamics.
This is a helper function called by default from #Simulation._sample(). It focuses on the state level, as
opposed to the observation one for the latter.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The transition outcome of the sampled transition.
"""
raise NotImplementedError
class UncertainTransitions(Simulation):
"""A domain must inherit this class if its dynamics is uncertain and provided as a white-box model.
Compared to pure simulation domains, uncertain transition ones provide in addition the full probability distribution
of next states given a memory and action.
!!! tip
Uncertain transition domains are typically stateless: they do not need to store the current state or history in
memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
next_state = self._get_next_state_distribution(memory, action).sample()
value = self._get_transition_value(memory, action, next_state)
# Termination could be inferred using get_next_state_distribution based on next_state,
# but would introduce multiple constraints on class definitions
termination = self._is_terminal(next_state)
return TransitionOutcome(next_state, value, termination, None)
@autocastable
def get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> Distribution[D.T_state]:
"""Get the probability distribution of next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The | |
AVPNV_Grouped, 64),
526: ('MAC-Address-Mask-Pattern', AVPNV_OctetString, 64),
527: ('EUI64-Address', AVPNV_OctetString, 64),
528: ('EUI64-Address-Mask', AVPNV_Grouped, 64),
529: ('EUI64-Address-Mask-Pattern', AVPNV_OctetString, 64),
530: ('Port', AVPNV_Integer32, 64),
531: ('Port-Range', AVPNV_Grouped, 64),
532: ('Port-Start', AVPNV_Integer32, 64),
533: ('Port-End', AVPNV_Integer32, 64),
534: ('Use-Assigned-Address', AVP_0_534, 64),
535: ('Diffserv-Code-Point', AVP_0_535, 64),
536: ('Fragmentation-Flag', AVP_0_536, 64),
537: ('IP-Option', AVPNV_Grouped, 64),
538: ('IP-Option-Type', AVP_0_538, 64),
539: ('IP-Option-Value', AVPNV_OctetString, 64),
540: ('TCP-Option', AVPNV_Grouped, 64),
541: ('TCP-Option-Type', AVP_0_541, 64),
542: ('TCP-Option-Value', AVPNV_OctetString, 64),
543: ('TCP-Flags', AVPNV_Grouped, 64),
544: ('TCP-Flag-Type', AVPNV_Unsigned32, 64),
545: ('ICMP-Type', AVPNV_Grouped, 64),
546: ('ICMP-Type-Number', AVP_0_546, 64),
547: ('ICMP-Code', AVP_0_547, 64),
548: ('ETH-Option', AVPNV_Grouped, 64),
549: ('ETH-Proto-Type', AVPNV_Grouped, 64),
550: ('ETH-Ether-Type', AVPNV_OctetString, 64),
551: ('ETH-SAP', AVPNV_OctetString, 64),
552: ('VLAN-ID-Range', AVPNV_Grouped, 64),
553: ('S-VID-Start', AVPNV_Unsigned32, 64),
554: ('S-VID-End', AVPNV_Unsigned32, 64),
555: ('C-VID-Start', AVPNV_Unsigned32, 64),
556: ('C-VID-End', AVPNV_Unsigned32, 64),
557: ('User-Priority-Range', AVPNV_Grouped, 64),
558: ('Low-User-Priority', AVPNV_Unsigned32, 64),
559: ('High-User-Priority', AVPNV_Unsigned32, 64),
560: ('Time-Of-Day-Condition', AVPNV_Grouped, 64),
561: ('Time-Of-Day-Start', AVPNV_Unsigned32, 64),
562: ('Time-Of-Day-End', AVPNV_Unsigned32, 64),
563: ('Day-Of-Week-Mask', AVPNV_Unsigned32, 64),
564: ('Day-Of-Month-Mask', AVPNV_Unsigned32, 64),
565: ('Month-Of-Year-Mask', AVPNV_Unsigned32, 64),
566: ('Absolute-Start-Time', AVPNV_Time, 64),
567: ('Absolute-Start-Fractional-Seconds', AVPNV_Unsigned32, 64),
568: ('Absolute-End-Time', AVPNV_Time, 64),
569: ('Absolute-End-Fractional-Seconds', AVPNV_Unsigned32, 64),
570: ('Timezone-Flag', AVP_0_570, 64),
571: ('Timezone-Offset', AVPNV_Integer32, 64),
572: ('Treatment-Action', AVPNV_Grouped, 64),
573: ('QoS-Profile-Id', AVPNV_Unsigned32, 64),
574: ('QoS-Profile-Template', AVPNV_Grouped, 64),
575: ('QoS-Semantics', AVP_0_575, 64),
576: ('QoS-Parameters', AVPNV_Grouped, 64),
577: ('Excess-Treatment', AVPNV_Grouped, 64),
578: ('QoS-Capability', AVPNV_Grouped, 64),
618: ('ERP-RK-Request', AVPNV_Grouped, 64),
619: ('ERP-Realm', AVPNV_StrLenField, 64),
},
10415: {
13: ('3GPP-Charging-Characteristics', AVPV_StrLenField, 192),
318: ('3GPP-AAA-Server-Name', AVPV_StrLenField, 192),
500: ('Abort-Cause', AVP_10415_500, 192),
501: ('Access-Network-Charging-Address', AVPV_Address, 192),
502: ('Access-Network-Charging-Identifier', AVPV_Grouped, 192),
503: ('Access-Network-Charging-Identifier-Value', AVPV_OctetString, 192), # noqa: E501
504: ('AF-Application-Identifier', AVPV_OctetString, 192),
505: ('AF-Charging-Identifier', AVPV_OctetString, 192),
506: ('Authorization-Token', AVPV_OctetString, 192),
507: ('Flow-Description', AVPV_IPFilterRule, 192),
508: ('Flow-Grouping', AVPV_Grouped, 192),
509: ('Flow-Number', AVPV_Unsigned32, 192),
510: ('Flows', AVPV_Grouped, 192),
511: ('Flow-Status', AVP_10415_511, 192),
512: ('Flow-Usage', AVP_10415_512, 192),
513: ('Specific-Action', AVP_10415_513, 192),
515: ('Max-Requested-Bandwidth-DL', AVPV_Unsigned32, 192),
516: ('Max-Requested-Bandwidth-UL', AVPV_Unsigned32, 192),
517: ('Media-Component-Description', AVPV_Grouped, 192),
518: ('Media-Component-Number', AVPV_Unsigned32, 192),
519: ('Media-Sub-Component', AVPV_Grouped, 192),
520: ('Media-Type', AVP_10415_520, 192),
521: ('RR-Bandwidth', AVPV_Unsigned32, 192),
522: ('RS-Bandwidth', AVPV_Unsigned32, 192),
523: ('SIP-Forking-Indication', AVP_10415_523, 192),
525: ('Service-URN', AVPV_OctetString, 192),
526: ('Acceptable-Service-Info', AVPV_Grouped, 192),
527: ('Service-Info-Status', AVP_10415_527, 192),
528: ('MPS-Identifier', AVPV_OctetString, 128),
529: ('AF-Signalling-Protocol', AVP_10415_529, 128),
531: ('Sponsor-Identity', AVPV_StrLenField, 128),
532: ('Application-Service-Provider-Identity', AVPV_StrLenField, 128),
533: ('Rx-Request-Type', AVP_10415_533, 128),
534: ('Min-Requested-Bandwidth-DL', AVPV_Unsigned32, 128),
535: ('Min-Requested-Bandwidth-UL', AVPV_Unsigned32, 128),
536: ('Required-Access-Info', AVP_10415_536, 128),
537: ('IP-Domain-Id', AVPV_OctetString, 128),
538: ('GCS-Identifier', AVPV_OctetString, 128),
539: ('Sharing-Key-DL', AVPV_Unsigned32, 128),
540: ('Sharing-Key-UL', AVPV_Unsigned32, 128),
541: ('Retry-Interval', AVPV_Unsigned32, 128),
600: ('Visited-Network-Identifier', AVPV_OctetString, 192),
601: ('Public-Identity', AVPV_StrLenField, 192),
602: ('Server-Name', AVPV_StrLenField, 192),
603: ('Server-Capabilities', AVPV_Grouped, 192),
604: ('Mandatory-Capability', AVPV_Unsigned32, 192),
605: ('Optional-Capability', AVPV_Unsigned32, 192),
606: ('User-Data', AVPV_OctetString, 192),
607: ('SIP-Number-Auth-Items', AVPV_Unsigned32, 192),
608: ('SIP-Authentication-Scheme', AVPV_StrLenField, 192),
609: ('SIP-Authenticate', AVPV_OctetString, 192),
610: ('SIP-Authorization', AVPV_OctetString, 192),
611: ('SIP-Authentication-Context', AVPV_OctetString, 192),
612: ('SIP-Auth-Data-Item', AVPV_Grouped, 192),
613: ('SIP-Item-Number', AVPV_Unsigned32, 192),
614: ('Server-Assignment-Type', AVP_10415_614, 192),
615: ('Deregistration-Reason', AVPV_Grouped, 192),
616: ('Reason-Code', AVP_10415_616, 192),
617: ('Reason-Info', AVPV_StrLenField, 192),
618: ('Charging-Information', AVPV_Grouped, 192),
619: ('Primary-Event-Charging-Function-Name', AVPV_StrLenField, 192),
620: ('Secondary-Event-Charging-Function-Name', AVPV_StrLenField, 192),
621: ('Primary-Charging-Collection-Function-Name', AVPV_StrLenField, 192), # noqa: E501
622: ('Secondary-Charging-Collection-Function-Name', AVPV_StrLenField, 192), # noqa: E501
623: ('User-Authorization-Type', AVP_10415_623, 192),
624: ('User-Data-Already-Available', AVP_10415_624, 192),
625: ('Confidentiality-Key', AVPV_OctetString, 192),
626: ('Integrity-Key', AVPV_OctetString, 192),
628: ('Supported-Features', AVPV_Grouped, 128),
629: ('Feature-List-ID', AVPV_Unsigned32, 128),
630: ('Feature-List', AVP_10415_630, 128),
631: ('Supported-Applications', AVPV_Grouped, 128),
632: ('Associated-Identities', AVPV_Grouped, 128),
633: ('Originating-Request', AVP_10415_633, 192),
634: ('Wildcarded-Public-Identity', AVPV_StrLenField, 128),
635: ('SIP-Digest-Authenticate', AVPV_Grouped, 128),
636: ('Wildcarded-IMPU', AVPV_StrLenField, 128),
637: ('UAR-Flags', AVPV_Unsigned32, 128),
638: ('Loose-Route-Indication', AVP_10415_638, 128),
639: ('SCSCF-Restoration-Info', AVPV_Grouped, 128),
640: ('Path', AVPV_OctetString, 128),
641: ('Contact', AVPV_OctetString, 128),
642: ('Subscription-Info', AVPV_Grouped, 128),
643: ('Call-ID-SIP-Header', AVPV_OctetString, 128),
644: ('From-SIP-Header', AVPV_OctetString, 128),
645: ('To-SIP-Header', AVPV_OctetString, 128),
646: ('Record-Route', AVPV_OctetString, 128),
647: ('Associated-Registered-Identities', AVPV_Grouped, 128),
648: ('Multiple-Registration-Indication', AVP_10415_648, 128),
649: ('Restoration-Info', AVPV_Grouped, 128),
650: ('Session-Priority', AVP_10415_650, 128),
651: ('Identity-with-Emergency-Registration', AVPV_Grouped, 128),
652: ('Priviledged-Sender-Indication', AVP_10415_652, 128),
653: ('LIA-Flags', AVPV_Unsigned32, 128),
654: ('Initial-CSeq-Sequence-Number', AVPV_Unsigned32, 128),
655: ('SAR-Flags', AVPV_Unsigned32, 128),
700: ('User-Identity', AVPV_Grouped, 192),
701: ('MSISDN', AVP_10415_701, 192),
702: ('User-Data', AVPV_OctetString, 192),
703: ('Data-Reference', AVP_10415_703, 192),
704: ('Service-Indication', AVPV_OctetString, 192),
705: ('Subs-Req-Type', AVP_10415_705, 192),
706: ('Requested-Domain', AVP_10415_706, 192),
707: ('Current-Location', AVP_10415_707, 192),
708: ('Identity-Set', AVP_10415_708, 128),
709: ('Expiry-Time', AVPV_Time, 128),
710: ('Send-Data-Indication', AVP_10415_710, 128),
711: ('DSAI-Tag', AVPV_OctetString, 192),
712: ('One-Time-Notification', AVP_10415_712, 128),
713: ('Requested-Nodes', AVPV_Unsigned32, 128),
714: ('Serving-Node-Indication', AVP_10415_714, 128),
715: ('Repository-Data-ID', AVPV_Grouped, 128),
716: ('Sequence-Number', AVPV_Unsigned32, 128),
717: ('Pre-paging-Supported', AVP_10415_717, 128),
718: ('Local-Time-Zone-Indication', AVP_10415_718, 128),
719: ('UDR-Flags', AVPV_Unsigned32, 128),
720: ('Call-Reference-Info', AVPV_Grouped, 128),
721: ('Call-Reference-Number', AVPV_OctetString, 128),
722: ('AS-Number', AVPV_OctetString, 128),
823: ('Event-Type', AVPV_Grouped, 192),
824: ('SIP-Method', AVPV_StrLenField, 192),
825: ('Event', AVPV_StrLenField, 192),
826: ('Content-Type', AVPV_StrLenField, 192),
827: ('Content-Length', AVPV_Unsigned32, 192),
828: ('Content-Disposition', AVPV_StrLenField, 192),
829: ('Role-Of-Node', AVP_10415_829, 192),
830: ('Session-Id', AVPV_StrLenField, 192),
831: ('Calling-Party-Address', AVPV_StrLenField, 192),
832: ('Called-Party-Address', AVPV_StrLenField, 192),
833: ('Time-Stamps', AVPV_Grouped, 192),
834: ('SIP-Request-Timestamp', AVPV_Time, 192),
835: ('SIP-Response-Timestamp', AVPV_Time, 192),
836: ('Application-Server', AVPV_StrLenField, 192),
837: ('Application-provided-called-party-address', AVPV_StrLenField, 192), # noqa: E501
838: ('Inter-Operator-Identifier', AVPV_Grouped, 192),
839: ('Originating-IOI', AVPV_StrLenField, 192),
840: ('Terminating-IOI', AVPV_StrLenField, 192),
841: ('IMS-Charging-Identifier', AVPV_StrLenField, 192),
842: ('SDP-Session-Description', AVPV_StrLenField, 192),
843: ('SDP-Media-Component', AVPV_Grouped, 192),
844: ('SDP-Media-Name', AVPV_StrLenField, 192),
845: ('SDP-Media-Description', AVPV_StrLenField, 192),
846: ('CG-Address', AVPV_Address, 192),
847: ('GGSN-Address', AVPV_Address, 192),
848: ('Served-Party-IP-Address', AVPV_Address, 192),
849: ('Authorised-QoS', AVPV_StrLenField, 192),
850: ('Application-Server-Information', AVPV_Grouped, 192),
851: ('Trunk-Group-Id', AVPV_Grouped, 192),
852: ('Incoming-Trunk-Group-Id', AVPV_StrLenField, 192),
853: ('Outgoing-Trunk-Group-Id', AVPV_StrLenField, 192),
854: ('Bearer-Service', AVPV_OctetString, 192),
855: ('Service-Id', AVPV_StrLenField, 192),
856: ('Associated-URI', AVPV_StrLenField, 192),
857: ('Charged-Party', AVPV_StrLenField, 192),
858: ('PoC-Controlling-Address', AVPV_StrLenField, 192),
859: ('PoC-Group-Name', AVPV_StrLenField, 192),
861: ('Cause-Code', AVPV_Integer32, 192),
862: ('Node-Functionality', AVP_10415_862, 192),
864: ('Originator', AVP_10415_864, 192),
865: ('PS-Furnish-Charging-Information', AVPV_Grouped, 192),
866: ('PS-Free-Format-Data', AVPV_OctetString, 192),
867: ('PS-Append-Free-Format-Data', AVP_10415_867, 192),
868: ('Time-Quota-Threshold', AVPV_Unsigned32, 192),
869: ('Volume-Quota-Threshold', AVPV_Unsigned32, 192),
870: ('Trigger-Type', AVP_10415_870, 192),
871: ('Quota-Holding-Time', AVPV_Unsigned32, 192),
872: ('Reporting-Reason', AVP_10415_872, 192),
873: ('Service-Information', AVPV_Grouped, 192),
874: ('PS-Information', AVPV_Grouped, 192),
876: ('IMS-Information', AVPV_Grouped, 192),
877: ('MMS-Information', AVPV_Grouped, 192),
878: ('LCS-Information', AVPV_Grouped, 192),
879: ('PoC-Information', AVPV_Grouped, 192),
880: ('MBMS-Information', AVPV_Grouped, 192),
881: ('Quota-Consumption-Time', AVPV_Unsigned32, 192),
882: ('Media-Initiator-Flag', AVP_10415_882, 192),
883: ('PoC-Server-Role', AVP_10415_883, 192),
884: ('PoC-Session-Type', AVP_10415_884, 192),
885: ('Number-Of-Participants', AVPV_Unsigned32, 192),
887: ('Participants-Involved', AVPV_StrLenField, 192),
888: ('Expires', AVPV_Unsigned32, 192),
889: ('Message-Body', AVPV_Grouped, 192),
897: ('Address-Data', AVPV_StrLenField, 192),
898: ('Address-Domain', AVPV_Grouped, 192),
899: ('Address-Type', AVP_10415_899, 192),
900: ('TMGI', AVPV_OctetString, 192),
901: ('Required-MBMS-Bearer-Capabilities', AVPV_StrLenField, 192),
902: ('MBMS-StartStop-Indication', AVP_10415_902, 192),
903: ('MBMS-Service-Area', AVPV_OctetString, 192),
904: ('MBMS-Session-Duration', AVPV_OctetString, 192),
905: ('Alternative-APN', AVPV_StrLenField, 192),
906: ('MBMS-Service-Type', AVP_10415_906, 192),
907: ('MBMS-2G-3G-Indicator', AVP_10415_907, 192),
909: ('RAI', AVPV_StrLenField, 192),
910: ('Additional-MBMS-Trace-Info', AVPV_OctetString, 192),
911: ('MBMS-Time-To-Data-Transfer', AVPV_OctetString, 192),
920: ('MBMS-Flow-Identifier', AVPV_OctetString, 192),
921: ('CN-IP-Multicast-Distribution', AVP_10415_921, 192),
922: ('MBMS-HC-Indicator', AVP_10415_922, 192),
1000: ('Bearer-Usage', AVP_10415_1000, 192),
1001: ('Charging-Rule-Install', AVPV_Grouped, 192),
1002: ('Charging-Rule-Remove', AVPV_Grouped, 192),
1003: ('Charging-Rule-Definition', AVPV_Grouped, 192),
1004: ('Charging-Rule-Base-Name', AVPV_StrLenField, 192),
1005: ('Charging-Rule-Name', AVPV_OctetString, 192),
1006: ('Event-Trigger', AVP_10415_1006, 192),
1007: ('Metering-Method', AVP_10415_1007, 192),
1008: ('Offline', AVP_10415_1008, 192),
1009: ('Online', AVP_10415_1009, 192),
1010: ('Precedence', AVPV_Unsigned32, 192),
1011: ('Reporting-Level', AVP_10415_1011, 192),
1012: ('TFT-Filter', AVPV_IPFilterRule, 192),
1013: ('TFT-Packet-Filter-Information', AVPV_Grouped, 192),
1014: ('ToS-Traffic-Class', AVPV_OctetString, 192),
1015: ('PDP-Session-Operation', AVP_10415_1015, 192),
1018: ('Charging-Rule-Report', AVPV_Grouped, 192),
1019: ('PCC-Rule-Status', AVP_10415_1019, 192),
1020: ('Bearer-Identifier', AVPV_OctetString, 192),
1021: ('Bearer-Operation', AVP_10415_1021, 192),
1022: ('Access-Network-Charging-Identifier-Gx', AVPV_Grouped, 192),
1023: ('Bearer-Control-Mode', AVP_10415_1023, 192),
1024: ('Network-Request-Support', AVP_10415_1024, 192),
1025: ('Guaranteed-Bitrate-DL', AVPV_Unsigned32, 192),
1026: ('Guaranteed-Bitrate-UL', AVPV_Unsigned32, 192),
1027: ('IP-CAN-Type', AVP_10415_1027, 192),
1028: ('QoS-Class-Identifier', AVP_10415_1028, 192),
1032: ('RAT-Type', AVP_10415_1032, 128),
1033: ('Event-Report-Indication', AVPV_Grouped, 128),
1034: ('Allocation-Retention-Priority', AVPV_Grouped, 128),
1035: ('CoA-IP-Address', AVPV_Address, 128),
1036: ('Tunnel-Header-Filter', AVPV_IPFilterRule, 128),
1037: ('Tunnel-Header-Length', AVPV_Unsigned32, 128),
1038: ('Tunnel-Information', AVPV_Grouped, 128),
1039: ('CoA-Information', AVPV_Grouped, 128),
1040: ('APN-Aggregate-Max-Bitrate-DL', AVPV_Unsigned32, 128),
1041: ('APN-Aggregate-Max-Bitrate-UL', AVPV_Unsigned32, 128),
1042: ('Revalidation-Time', AVPV_Time, 192),
1043: ('Rule-Activation-Time', AVPV_Time, 192),
1044: ('Rule-Deactivation-Time', AVPV_Time, 192),
1045: ('Session-Release-Cause', AVP_10415_1045, 192),
1046: ('Priority-Level', AVPV_Unsigned32, 128),
1047: ('Pre-emption-Capability', AVP_10415_1047, 128),
1048: ('Pre-emption-Vulnerability', AVP_10415_1048, 128),
1049: ('Default-EPS-Bearer-QoS', AVPV_Grouped, 128),
1050: ('AN-GW-Address', AVPV_Address, 128),
1056: ('Security-Parameter-Index', AVPV_OctetString, 128),
1057: ('Flow-Label', AVPV_OctetString, 128),
1058: ('Flow-Information', AVPV_Grouped, 128),
1059: ('Packet-Filter-Content', AVPV_IPFilterRule, 128),
1060: ('Packet-Filter-Identifier', AVPV_OctetString, 128),
1061: ('Packet-Filter-Information', AVPV_Grouped, 128),
1062: ('Packet-Filter-Operation', AVP_10415_1062, 128),
1063: ('Resource-Allocation-Notification', AVP_10415_1063, 128),
1065: ('PDN-Connection-ID', AVPV_OctetString, 128),
1066: ('Monitoring-Key', AVPV_OctetString, 128),
1067: ('Usage-Monitoring-Information', AVPV_Grouped, 128),
1068: ('Usage-Monitoring-Level', AVP_10415_1068, 128),
1069: ('Usage-Monitoring-Report', AVP_10415_1069, 128),
1070: ('Usage-Monitoring-Support', AVP_10415_1070, 128),
1071: ('CSG-Information-Reporting', AVP_10415_1071, 128),
1072: ('Packet-Filter-Usage', AVP_10415_1072, | |
parsable .uc file
so we explicitly check the exit status
"""
return exit_status == 0
def getHelp(self):
"""Method that points to documentation"""
help_str =\
"""
USEARCH is hosted at:
http://www.drive5.com/usearch/
The following papers should be cited if this resource is used:
Edgar,RC, Haas,BJ, Clemente,JC, Quince,C, Knight,R (2011) UCHIME
improves sensitivity and speed of chimera detection, Bioinformatics
"""
return help_str
# Start Usearch61 convenience functions
def usearch61_ref_cluster(seq_path,
refseqs_fp,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
suppress_new_clusters=False,
threads=1.0,
HALT_EXEC=False
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for reference-based clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds (only applies
when doing open reference de novo clustering)
suppress_new_clusters: If True, will allow de novo clustering on top of
reference clusters.
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
Description of analysis workflows
---------------------------------
closed-reference approach:
dereplicate sequences first, do reference based clustering,
merge clusters/failures and dereplicated data,
write OTU mapping and failures file.
open-reference approach:
dereplicate sequences first, do reference based clustering, parse failures,
sort failures fasta according to chosen method, cluster failures, merge
reference clustering results/de novo results/dereplicated data, write
OTU mapping file.
Dereplication should save processing time for large datasets.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = join(abspath(output_dir), '')
seq_path = abspath(seq_path)
try:
if verbose:
print "Presorting sequences according to abundance..."
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
'abundance_sorted.fna'),
output_uc_filepath=join(
output_dir,
'abundance_sorted.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
if verbose:
print "Performing reference based clustering..."
clusters_fp, app_result = usearch61_cluster_ref(intermediate_fasta,
refseqs_fp, percent_id, rev, minlen, output_dir,
remove_usearch_logs, wordlength, usearch61_maxrejects,
usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
'ref_clustered.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix="",
ref_clustered=True)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(clusters,
dereplicated_clusters)
failures = merge_failures_dereplicated_seqs(failures,
dereplicated_clusters)
if not suppress_new_clusters and failures:
if verbose:
print "Parsing out sequences that failed to cluster..."
failures_fasta = parse_usearch61_failures(seq_path, set(failures),
output_fasta_fp=join(output_dir, "failures_parsed.fna"))
if not save_intermediate_files:
files_to_remove.append(failures_fasta)
denovo_clusters = usearch61_denovo_cluster(failures_fasta,
percent_id, rev, save_intermediate_files, minlen, output_dir,
remove_usearch_logs, verbose, wordlength, usearch_fast_cluster,
usearch61_sort_method, otu_prefix, usearch61_maxrejects,
usearch61_maxaccepts, sizeorder, threads, HALT_EXEC)
failures = []
# Merge ref and denovo clusters
clusters.update(denovo_clusters)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch '
'v6.1.544) is installed or improperly formatted input file was '
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly '
'installed?')
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters, failures
def usearch61_denovo_cluster(seq_path,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
threads=1.0,
HALT_EXEC=False,
file_prefix="denovo_"
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
seq_path = abspath(seq_path)
try:
if verbose and usearch61_sort_method is not None and\
not usearch_fast_cluster:
print "Sorting sequences according to %s..." % usearch61_sort_method
# fast sorting option automatically performs length sorting
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
file_prefix + 'abundance_sorted.fna'),
output_uc_filepath=join(output_dir,
file_prefix + 'abundance_sorted.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
elif usearch61_sort_method == 'length' and not usearch_fast_cluster:
intermediate_fasta, app_result =\
sort_by_length_usearch61(seq_path, output_dir, minlen,
remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(output_dir,
file_prefix + 'length_sorted.fna'))
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
else:
intermediate_fasta = seq_path
if verbose:
print "Clustering sequences de novo..."
if usearch_fast_cluster:
clusters_fp, app_result = usearch61_fast_cluster(
intermediate_fasta,
percent_id, minlen, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
file_prefix + 'fast_clustered.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
else:
clusters_fp, app_result =\
usearch61_smallmem_cluster(intermediate_fasta, percent_id,
minlen, rev, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, sizeorder, HALT_EXEC,
output_uc_filepath=join(output_dir,
file_prefix + 'smallmem_clustered.uc'))
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v6.1.544) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly ' +
'installed?')
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
de_novo_clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters)
else:
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters
# Start fasta sorting functions
def sort_by_abundance_usearch61(seq_path,
output_dir='.',
rev=False,
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
output_uc_filepath=None,
log_name="abundance_sorted.log",
threads=1.0):
""" usearch61 application call to sort fasta file by abundance.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
rev: enable reverse strand matching for clustering/sorting
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
output_uc_filepath: path to write usearch61 generated .uc file
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='abundance_sorted',
suffix='.fna')
if not output_uc_filepath:
_, output_uc_filepath = mkstemp(prefix='abundance_sorted',
suffix='.uc')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sizeout': True,
'--derep_fulllength': seq_path,
'--output': output_fna_filepath,
'--uc': output_uc_filepath,
'--threads': threads
}
if rev:
params['--strand'] = 'both'
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, output_uc_filepath, app_result
def sort_by_length_usearch61(seq_path,
output_dir=".",
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
log_name="length_sorted.log"):
""" usearch61 application call to sort fasta file by length.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
log_name: filepath to write usearch61 generated log file
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='length_sorted', suffix='.fna')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sortbylength': seq_path,
'--output': output_fna_filepath
}
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, app_result
# End fasta sorting functions
# Start | |
# util/_py_collections.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
from itertools import filterfalse
from typing import AbstractSet
from typing import Any
from typing import cast
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
_T = TypeVar("_T", bound=Any)
_KT = TypeVar("_KT", bound=Any)
_VT = TypeVar("_VT", bound=Any)
class ImmutableContainer:
__slots__ = ()
def _immutable(self, *arg: Any, **kw: Any) -> NoReturn:
raise TypeError("%s object is immutable" % self.__class__.__name__)
def __delitem__(self, key: Any) -> NoReturn:
self._immutable()
def __setitem__(self, key: Any, value: Any) -> NoReturn:
self._immutable()
def __setattr__(self, key: str, value: Any) -> NoReturn:
self._immutable()
class ImmutableDictBase(ImmutableContainer, Dict[_KT, _VT]):
def clear(self) -> NoReturn:
self._immutable()
def pop(self, key: Any, default: Optional[Any] = None) -> NoReturn:
self._immutable()
def popitem(self) -> NoReturn:
self._immutable()
def setdefault(self, key: Any, default: Optional[Any] = None) -> NoReturn:
self._immutable()
def update(self, *arg: Any, **kw: Any) -> NoReturn:
self._immutable()
class immutabledict(ImmutableDictBase[_KT, _VT]):
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args: Union[Mapping[_KT, _VT], Tuple[_KT, _VT]]):
pass
def __reduce__(self):
return immutabledict, (dict(self),)
def union(
self, __d: Optional[Mapping[_KT, _VT]] = None
) -> immutabledict[_KT, _VT]:
if not __d:
return self
new = dict.__new__(self.__class__)
dict.__init__(new, self)
dict.update(new, __d)
return new
def _union_w_kw(
self, __d: Optional[Mapping[_KT, _VT]] = None, **kw: _VT
) -> immutabledict[_KT, _VT]:
# not sure if C version works correctly w/ this yet
if not __d and not kw:
return self
new = dict.__new__(self.__class__)
dict.__init__(new, self)
if __d:
dict.update(new, __d)
dict.update(new, kw) # type: ignore
return new
def merge_with(
self, *dicts: Optional[Mapping[_KT, _VT]]
) -> immutabledict[_KT, _VT]:
new = None
for d in dicts:
if d:
if new is None:
new = dict.__new__(self.__class__)
dict.__init__(new, self)
dict.update(new, d)
if new is None:
return self
return new
def __repr__(self) -> str:
return "immutabledict(%s)" % dict.__repr__(self)
_S = TypeVar("_S", bound=Any)
class OrderedSet(Set[_T]):
__slots__ = ("_list",)
_list: List[_T]
def __init__(self, d=None):
if d is not None:
self._list = unique_list(d)
super().update(self._list)
else:
self._list = []
def __reduce__(self):
return (OrderedSet, (self._list,))
def add(self, element: _T) -> None:
if element not in self:
self._list.append(element)
super().add(element)
def remove(self, element: _T) -> None:
super().remove(element)
self._list.remove(element)
def insert(self, pos: int, element: _T) -> None:
if element not in self:
self._list.insert(pos, element)
super().add(element)
def discard(self, element: _T) -> None:
if element in self:
self._list.remove(element)
super().remove(element)
def clear(self) -> None:
super().clear()
self._list = []
def __getitem__(self, key: int) -> _T:
return self._list[key]
def __iter__(self) -> Iterator[_T]:
return iter(self._list)
def __add__(self, other: Iterator[_T]) -> "OrderedSet[_T]":
return self.union(other)
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, *iterables: Iterable[_T]) -> None:
for iterable in iterables:
for e in iterable:
if e not in self:
self._list.append(e)
super().add(e)
def __ior__(self, other: AbstractSet[_S]) -> "OrderedSet[Union[_T, _S]]":
self.update(other) # type: ignore
return self # type: ignore
def union(self, *other: Iterable[_S]) -> "OrderedSet[Union[_T, _S]]":
result: "OrderedSet[Union[_T, _S]]" = self.__class__(self) # type: ignore # noqa E501
for o in other:
result.update(o)
return result
def __or__(self, other: AbstractSet[_S]) -> "OrderedSet[Union[_T, _S]]":
return self.union(other)
def intersection(self, *other: Iterable[Any]) -> "OrderedSet[_T]":
other_set: Set[Any] = set()
other_set.update(*other)
return self.__class__(a for a in self if a in other_set)
def __and__(self, other: AbstractSet[object]) -> "OrderedSet[_T]":
return self.intersection(other)
def symmetric_difference(self, other: Iterable[_T]) -> "OrderedSet[_T]":
other_set = other if isinstance(other, set) else set(other)
result = self.__class__(a for a in self if a not in other_set)
result.update(a for a in other if a not in self)
return result
def __xor__(self, other: AbstractSet[_S]) -> "OrderedSet[Union[_T, _S]]":
return cast("OrderedSet[Union[_T, _S]]", self).symmetric_difference(
other
)
def difference(self, *other: Iterable[Any]) -> "OrderedSet[_T]":
other_set = super().difference(*other)
return self.__class__(a for a in self._list if a in other_set)
def __sub__(self, other: AbstractSet[Optional[_T]]) -> "OrderedSet[_T]":
return self.difference(other)
def intersection_update(self, *other: Iterable[Any]) -> None:
super().intersection_update(*other)
self._list = [a for a in self._list if a in self]
def __iand__(self, other: AbstractSet[object]) -> "OrderedSet[_T]":
self.intersection_update(other)
return self
def symmetric_difference_update(self, other: Iterable[Any]) -> None:
super().symmetric_difference_update(other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other if a in self]
def __ixor__(self, other: AbstractSet[_S]) -> "OrderedSet[Union[_T, _S]]":
self.symmetric_difference_update(other)
return cast("OrderedSet[Union[_T, _S]]", self)
def difference_update(self, *other: Iterable[Any]) -> None:
super().difference_update(*other)
self._list = [a for a in self._list if a in self]
def __isub__(self, other: AbstractSet[Optional[_T]]) -> "OrderedSet[_T]":
self.difference_update(other)
return self
class IdentitySet:
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
def __init__(self, iterable=None):
self._members = dict()
if iterable:
self.update(iterable)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError("pop from an empty set")
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError("cannot compare sets using cmp()")
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
if isinstance(iterable, self.__class__):
other = iterable
else:
other = self.__class__(iterable)
if len(self) > len(other):
return False
for m in filterfalse(
other._members.__contains__, iter(self._members.keys())
):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
if isinstance(iterable, self.__class__):
other = iterable
else:
other = self.__class__(iterable)
if len(self) < len(other):
return False
for m in filterfalse(
self._members.__contains__, iter(other._members.keys())
):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = self.__class__()
members = self._members
result._members.update(members)
result._members.update((id(obj), obj) for obj in iterable)
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members.update((id(obj), obj) for obj in iterable)
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = self.__new__(self.__class__)
if isinstance(iterable, self.__class__):
other = iterable._members
else:
other = {id(obj) for obj in iterable}
result._members = {
k: v for k, v in self._members.items() if k not in other
}
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = self.__new__(self.__class__)
if isinstance(iterable, self.__class__):
other = iterable._members
else:
other = {id(obj) for obj in iterable}
result._members = {
k: v for k, v in self._members.items() if k in other
}
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = self.__new__(self.__class__)
if isinstance(iterable, self.__class__):
other = iterable._members
else:
other = {id(obj): obj for obj in iterable}
result._members = {
k: v for k, v in self._members.items() if k not in other
}
result._members.update(
(k, v) for k, v in other.items() if k not in self._members
)
return result
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
result = self.__new__(self.__class__)
result._members = self._members.copy()
return result
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError("set objects are unhashable")
def __repr__(self):
return "%s(%r)" % (type(self).__name__, list(self._members.values()))
def unique_list(seq, hashfunc=None):
seen = set()
seen_add = seen.add
if not hashfunc:
return [x for x in seq if x not in seen and not seen_add(x)]
else:
return [
x
for x in | |
import itertools
import logging
import json
import operator
import time
from collections import defaultdict
from typing import List, Tuple, Dict
from mip import Model, minimize, CONTINUOUS, xsum, OptimizationStatus
from rapidstream.BE.Utilities import isPairSLRCrossing
from rapidstream.BE.Device.U250 import idx_of_left_side_slice_of_laguna_column
from autobridge.Device.DeviceManager import DeviceU250
from autobridge.Opt.Slot import Slot
U250_inst = DeviceU250()
slice_to_laguna = {idx_of_left_side_slice_of_laguna_column[i] : i \
for i in range(len(idx_of_left_side_slice_of_laguna_column))}
class SLLChannel:
"""
each SLLChannel consists of 24 SLL wires
The bottom/top of those SLL wires are the same with reference to SLICE coordinates
each channel will correspond to 8 laguna sites, each with 6 RX registers
if an anchor is upward, it must be placed on the RX at the top side
otherwise it must be placed on the RX at the bottom side
"""
def __init__(self, bottom_coor_y, i_th_column: int):
self.bottom_coor_x = idx_of_left_side_slice_of_laguna_column[i_th_column]
self.bottom_coor_y = bottom_coor_y
self.top_coor_x = self.bottom_coor_x
self.top_coor_y = bottom_coor_y + 60
self.capacity = 20
self.bottom_slot_y_min = int(bottom_coor_y / 120) * 120
self.bottom_slot_y_max = self.bottom_slot_y_min + 119
self._initRXList(i_th_column, bottom_coor_y)
def __hash__(self):
return hash((self.bottom_coor_x, self.bottom_coor_y))
def __str__(self):
return self.getString()
def getString(self):
return f'X{self.bottom_coor_x}Y{self.bottom_coor_y} <-> X{self.top_coor_x}Y{self.top_coor_y}'
def _initRXList(self, i_th_column, bottom_coor_y):
"""
get the laguna RX registers associated with this channel
"""
bottom_laguna_sites = [
f'LAGUNA_X{x}Y{y}' for x in (i_th_column*2, i_th_column*2+1) \
for y in self._get_nearest_laguna_y(bottom_coor_y) ]
top_laguna_sites = [
f'LAGUNA_X{x}Y{y}' for x in (i_th_column*2, i_th_column*2+1) \
for y in self._get_nearest_laguna_y(bottom_coor_y + 60) ]
# each laguna site has 6 RX registers
self.bottom_laguna_RX = [f'{site}/RX_REG{i}' for i in range(6) for site in bottom_laguna_sites]
self.top_laguna_RX = [f'{site}/RX_REG{i}' for i in range(6) for site in top_laguna_sites]
def _get_nearest_laguna_y(self, slice_y):
"""
convert from SLICE coordinate to laguna coordinate
"""
if 180 <= slice_y <= 299:
laguna_y = (slice_y - 180) * 2 + 120
elif 420 <= slice_y <= 539:
laguna_y = (slice_y - 420) * 2 + 360
elif 660 <= slice_y <= 779:
laguna_y = (slice_y - 660) * 2 + 600
else:
assert False
return (laguna_y, laguna_y+1)
def getCostForAnchor(self, list_of_cell_property_dict: List[Dict], anchor_direction: str) -> float:
"""
the cost for placing an anchor on this channel
"""
SLR_crossing_penalty = 10
SLL_length = 60
lut_penalty = lambda num_lut_on_path : 1 + 0.3 * num_lut_on_path
def getDistFromCells(list_of_cell_property_dict: List[Dict]) -> List[int]:
"""
Distance between the RX of the SLL and the end cell.
If the connection goes up, the dist is between the end cells and the top_coor
Else the dist is between the end cells and the bottom_coor
"""
dists = []
# for loc, type in coor_to_cell_types.items():
for cell_property_dict in list_of_cell_property_dict:
loc = cell_property_dict["normalized_coordinate"]
x, y = loc[0], loc[1]
# determine if the cell is at the top side or bottom side
is_cell_at_bottom = self.bottom_slot_y_min <= y <= self.bottom_slot_y_max
if anchor_direction == 'DOWN':
if is_cell_at_bottom:
orig_dist = abs(x - self.bottom_coor_x) + abs(y - self.bottom_coor_y)
else:
# if a connection goes down, the end cell at the top will connect to
# the input of the SLL at the top, then travel through SLL to the RX at the bottom
orig_dist = SLR_crossing_penalty + SLL_length + abs(x - self.top_coor_x) + abs(y - self.top_coor_y)
elif anchor_direction == 'UP':
if is_cell_at_bottom:
# if a connection goes up, the end cell at the bottom will connect to
# the input of the SLL at the bottom, then travel through SLL to the RX at the top
orig_dist = SLR_crossing_penalty + SLL_length + abs(x - self.bottom_coor_x) + abs(y - self.bottom_coor_y)
else:
orig_dist = abs(x - self.top_coor_x) + abs(y - self.top_coor_y)
else:
assert False
# penaltize wires to LUTs
dists.append(orig_dist * lut_penalty(cell_property_dict["num_lut_on_path"]))
return dists
dists = getDistFromCells(list_of_cell_property_dict)
# avg wire length
dist_score = sum(dists) / len(dists)
unbalance_penalty = max(dists) - min(dists)
# prevent extremely short wires
hold_penalty = max(0, 10 - min(dists))
return dist_score + unbalance_penalty + hold_penalty
def placeAnchor(self, anchor_dir):
"""
mark an RX register as occupied by popping it out
The sites at the top will use the RX from small index to large index
the sites at the bottom will use the RX from large index to small index
Note that each SLL is associate with two RX and two TX registers
so that it can be used in both directions. But only one of them could be used.
"""
if anchor_dir == 'UP':
return self.top_laguna_RX.pop()
elif anchor_dir == 'DOWN':
return self.bottom_laguna_RX.pop(0)
else:
assert False, anchor_dir
def _get_anchor_2_sll_dir(hub, slot1_name, slot2_name, anchor_connections: Dict[str, List[Dict[str, str]]]) -> Dict[str, str]:
"""
each anchor will use one SLL connection.
get which direction will the SLL will be used, upward or downward
"""
slot1 = Slot(U250_inst, slot1_name)
slot2 = Slot(U250_inst, slot2_name)
up_slot = slot1 if slot1.down_left_y > slot2.down_left_y else slot2
# get the downward IO of the upper slot
up_slot_io = hub['PathPlanningWire'][up_slot.getRTLModuleName()]['DOWN']
# double check that the information in the hub is consistent
all_io = hub['SlotIO'][up_slot.getRTLModuleName()]
io_from_all_directions = list(itertools.chain.from_iterable(hub['PathPlanningWire'][up_slot.getRTLModuleName()].values()))
if not len(all_io) == len(io_from_all_directions) + 1: # +1 because of ap_clk
name_all_io = [io[-1] for io in all_io]
name_io_from_all_directions = [io[-1] for io in io_from_all_directions]
diff_list = set(name_all_io) - set(name_io_from_all_directions)
# the only difference should be top-level IOs
assert all('_axi_' in d or 'clk' in d or 'interrupt' == d or 'ap_rst_n' == d for d in diff_list), diff_list
# the output wire of the upper slot will travel DOWN the sll
get_sll_dir = lambda in_or_out : 'DOWN' if in_or_out == 'output' else 'UP'
slot_io_2_sll_dir = {io[-1] : get_sll_dir(io[0]) for io in up_slot_io}
anchor_2_sll_dir = {}
for anchor in anchor_connections.keys():
hls_var_name = anchor.split('_q0_reg')[0]
anchor_2_sll_dir[anchor] = slot_io_2_sll_dir[hls_var_name]
return anchor_2_sll_dir
def getSLLChannelToAnchorCost(
sll_channel_list: List[SLLChannel],
anchor_connections: Dict[str, List[Dict]],
anchor_to_sll_dir: Dict[str, str]):
"""
We need to assign a score if an anchor is placed in a bin
To prevent hold violation, we neglect the length of the SLL. Thus the distance will be
(1) the source cell to the input of the SLL
(2) the output of the SLL to the destination cells
return: SLL channel -> anchor -> score
"""
sll_to_anchor_to_cost = {}
for sll_channel in sll_channel_list:
anchor_to_cost = {anchor : sll_channel.getCostForAnchor(list_of_cell_property_dict, anchor_to_sll_dir[anchor]) \
for anchor, list_of_cell_property_dict in anchor_connections.items()}
sll_to_anchor_to_cost[sll_channel] = anchor_to_cost
anchor_to_sll_to_cost = defaultdict(dict)
for sll, anchor_to_cost in sll_to_anchor_to_cost.items():
for anchor, cost in anchor_to_cost.items():
anchor_to_sll_to_cost[anchor][sll] = cost
saveAnchorToSLLToCost(anchor_to_sll_to_cost)
return sll_to_anchor_to_cost, anchor_to_sll_to_cost
def getSLLChannels(slot1_name: str, slot2_name: str) -> List[SLLChannel]:
"""
get all SLL channels between a slot pair
each channel should have an input coor, an output coor, and 24 RX names
first get the X coor of the 4 columns
"""
slot1 = Slot(U250_inst, slot1_name)
slot2 = Slot(U250_inst, slot2_name)
i_th_column_range = range(slot1.down_left_x * 2, (slot1.up_right_x+1) * 2)
pair_down_left_y = min(slot1.down_left_y, slot2.down_left_y)
if pair_down_left_y == 2:
sll_bottom_y_range = range(180, 240)
elif pair_down_left_y == 6:
sll_bottom_y_range = range(420, 480)
elif pair_down_left_y == 10:
sll_bottom_y_range = range(660, 720)
else:
assert False
sll_channels = [SLLChannel(y, i) for y in sll_bottom_y_range for i in i_th_column_range]
logging.info(f'SLL channel num: {len(sll_channels)}')
logging.info(f'Total SLL channel capacity: {len(sll_channels) * sll_channels[0].capacity }')
return sll_channels
def placeAnchorToSLLChannel(anchor_to_sll_to_cost, pair_name) -> Dict[str, SLLChannel]:
"""
run ILP to map anchor to channels
"""
start_time = time.perf_counter()
get_time_stamp = lambda : time.perf_counter() - start_time
m = Model()
anchor_to_sll_to_var = {}
for anchor, sll_to_cost in anchor_to_sll_to_cost.items():
sll_to_var = {sll : m.add_var(var_type=CONTINUOUS, lb=0, ub=1) for sll in sll_to_cost.keys()}
anchor_to_sll_to_var[anchor] = sll_to_var
sll_to_anchor_to_var = defaultdict(dict)
for anchor, sll_to_var in anchor_to_sll_to_var.items():
for sll, var in sll_to_var.items():
sll_to_anchor_to_var[sll][anchor] = var
# each anchor is placed once
for anchor, sll_to_var in anchor_to_sll_to_var.items():
m += xsum(var for var in sll_to_var.values()) == 1
# limit on sll capacity, currently set to 20/24
for sll, anchor_to_var in sll_to_anchor_to_var.items():
m += xsum(var for var in anchor_to_var.values()) <= sll.capacity
# objective
var_and_cost = []
for anchor, sll_to_cost in anchor_to_sll_to_cost.items():
sll_to_var = anchor_to_sll_to_var[anchor]
for sll in sll_to_cost.keys():
var_and_cost.append((sll_to_var[sll], sll_to_cost[sll]))
m.objective = minimize(xsum(var * cost for var, cost in var_and_cost))
status = m.optimize()
if anchor_to_sll_to_var:
assert status == OptimizationStatus.OPTIMAL or status == OptimizationStatus.FEASIBLE, f'failed in ILP placement for {pair_name}'
anchor_to_sll = {}
for anchor, sll_to_var in anchor_to_sll_to_var.items():
for sll, var in sll_to_var.items():
var_value = round(var.x)
assert abs(var.x - var_value) < 0.000001, var.x
if | |
and total acc time as functions of LCL cells and PM meshes for each
charge assignment order sequence.
Parameters
----------
data_df : pandas.DataFrame, Optional
Timing study data. If `None` it will look for previously saved data, otherwise it will run
:meth:`sarkas.processes.PreProcess.timing_study_calculation` to calculate the data. Default is `None`.
"""
from scipy.interpolate import griddata
fig_path = self.pppm_plots_dir
if not data_df:
try:
data_df = pd.read_csv(
join(self.io.preprocessing_dir, f"TimingStudy_data_{self.io.job_id}.csv"), index_col=False
)
except FileNotFoundError:
print(f"I could not find the data from the timing study. Running the timing study now.")
self.timing_study_calculation()
else:
data_df = self.dataframe.copy(deep=True)
# Plot the results
for _, cao in enumerate(self.pm_caos):
mask = self.dataframe["pppm_cao_x"] == cao
df = data_df[mask][
[
"M_x",
"pp_cells",
"force error [measured]",
"pp_acc_time [ns]",
"pm_acc_time [ns]",
"tot_acc_time [ns]",
]
]
# 2D-arrays from DataFrame
x1 = linspace(df["M_x"].min(), df["M_x"].max(), len(df["M_x"].unique()))
y1 = linspace(df["pp_cells"].min(), df["pp_cells"].max(), len(df["pp_cells"].unique()))
m_mesh, c_mesh = meshgrid(x1, y1)
# Interpolate unstructured D-dimensional data.
tot_time_map = griddata((df["M_x"], df["pp_cells"]), df["tot_acc_time [ns]"], (m_mesh, c_mesh))
force_error_map = griddata((df["M_x"], df["pp_cells"]), df["force error [measured]"], (m_mesh, c_mesh))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 9))
if force_error_map.min() == 0.0:
minv = 1e-120
else:
minv = force_error_map.min()
maxt = force_error_map.max()
nlvl = 12
lvls = logspace(log10(minv), log10(maxt), nlvl)
luxmap = get_cmap("viridis", nlvl)
luxnorm = LogNorm(vmin=minv, vmax=maxt)
CS = ax1.contourf(m_mesh, c_mesh, force_error_map, levels=lvls, cmap=luxmap, norm=luxnorm)
clb = fig.colorbar(ScalarMappable(norm=luxnorm, cmap=luxmap), ax=ax1)
clb.set_label(r"Force Error [$Q^2/ a_{\rm ws}^2$] " + f"@ cao = {cao}", rotation=270, va="bottom")
CS2 = ax1.contour(CS, colors="w")
ax1.clabel(CS2, fmt="%1.0e", colors="w")
if cao == self.potential.pppm_cao[0]:
input_Nc = int(self.potential.box_lengths[0] / self.potential.rc)
ax1.scatter(self.potential.pppm_mesh[0], input_Nc, s=200, c="k")
ax1.set_xlabel("Mesh size")
ax1.set_ylabel(r"LCL Cells")
ax1.set_title(f"Force Error Map @ cao = {cao}")
# Timing Plot
maxt = tot_time_map.max()
mint = tot_time_map.min()
# nlvl = 13
lvls = logspace(log10(mint), log10(maxt), nlvl)
luxmap = get_cmap("viridis", nlvl)
luxnorm = LogNorm(vmin=minv, vmax=maxt)
CS = ax2.contourf(m_mesh, c_mesh, tot_time_map, levels=lvls, cmap=luxmap)
CS2 = ax2.contour(CS, colors="w", levels=lvls)
ax2.clabel(CS2, fmt="%.2e", colors="w")
# fig.colorbar(, ax = ax2)
clb = fig.colorbar(ScalarMappable(norm=luxnorm, cmap=luxmap), ax=ax2)
clb.set_label("CPU Time [ns]", rotation=270, va="bottom")
if cao == self.potential.pppm_cao[0]:
input_Nc = int(self.potential.box_lengths[0] / self.potential.rc)
ax2.scatter(self.potential.pppm_mesh[0], input_Nc, s=200, c="k")
ax2.set_xlabel("Mesh size")
ax2.set_title(f"Timing Map @ cao = {cao}")
fig.savefig(join(fig_path, f"ForceErrorMap_v_Timing_cao_{cao}_{self.io.job_id}.png"))
def postproc_estimates(self):
# POST- PROCESSING
self.io.postprocess_info(self, write_to_file=True, observable="header")
if hasattr(self, "rdf"):
self.rdf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable="rdf")
if hasattr(self, "ssf"):
self.ssf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable="ssf")
if hasattr(self, "dsf"):
self.dsf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable="dsf")
if hasattr(self, "ccf"):
self.ccf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable="ccf")
if hasattr(self, "vm"):
self.ccf.setup(self.parameters)
self.io.postprocess_info(self, write_to_file=True, observable="vm")
def pppm_approximation(self):
"""
Calculate the force error for a PPPM simulation using analytical approximations.\n
Plot the force error in the parameter space.
"""
self.pppm_plots_dir = join(self.io.preprocessing_dir, "PPPM_Plots")
if not exists(self.pppm_plots_dir):
mkdir(self.pppm_plots_dir)
# Calculate Force error from analytic approximation given in Dharuman et al. J Chem Phys 2017
total_force_error, pp_force_error, pm_force_error, rcuts, alphas = self.analytical_approx_pppm()
chosen_alpha = self.potential.pppm_alpha_ewald * self.parameters.a_ws
chosen_rcut = self.potential.rc / self.parameters.a_ws
# mesh_dir = join(self.pppm_plots_dir, 'Mesh_{}'.format(self.potential.pppm_mesh[0]))
# if not exists(mesh_dir):
# mkdir(mesh_dir)
#
# cell_num = int(self.parameters.box_lengths.min() / self.potential.rc)
# cell_dir = join(mesh_dir, 'Cells_{}'.format(cell_num))
# if not exists(cell_dir):
# mkdir(cell_dir)
#
# self.pppm_plots_dir = cell_dir
# Color Map
self.make_pppm_color_map(rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error)
# Line Plot
self.make_pppm_line_plot(rcuts, alphas, chosen_alpha, chosen_rcut, total_force_error)
print(f"\nFigures can be found in {self.pppm_plots_dir}")
def remove_preproc_dumps(self):
# Delete the energy files created during the estimation runs
os_remove(self.io.eq_energy_filename)
os_remove(self.io.prod_energy_filename)
# Delete dumps created during the estimation runs
for npz in listdir(self.io.eq_dump_dir):
os_remove(join(self.io.eq_dump_dir, npz))
for npz in listdir(self.io.prod_dump_dir):
os_remove(join(self.io.prod_dump_dir, npz))
if self.parameters.magnetized and self.parameters.electrostatic_equilibration:
os_remove(self.io.mag_energy_filename)
# Remove dumps
for npz in listdir(self.io.mag_dump_dir):
os_remove(join(self.io.mag_dump_dir, npz))
def run(
self,
loops: int = 10,
timing: bool = True,
timing_study: bool = False,
pppm_estimate: bool = False,
postprocessing: bool = False,
remove: bool = False,
):
"""
Estimate the time of the simulation and best parameters if wanted.
Parameters
----------
loops : int
Number of loops over which to average the acceleration calculation. Default = 10.
timing : bool
Flag for estimating simulation times. Default =True.
timing_study : bool
Flag for estimating time for simulation parameters.
pppm_estimate : bool
Flag for showing the force error plots in case of pppm algorithm.
postprocessing : bool
Flag for calculating Post processing parameters.
remove : bool
Flag for removing energy files and dumps created during times estimation. Default = False.
"""
# Clean everything
plt.close("all")
# Set the screening parameter
self.kappa = self.potential.matrix[1, 0, 0] if self.potential.type == "yukawa" else 0.0
if timing:
self.time_n_space_estimates(loops=loops)
if remove:
self.remove_preproc_dumps()
if pppm_estimate:
self.pppm_approximation()
if timing_study:
self.timing_study_calculation()
self.make_timing_plots()
self.make_force_v_timing_plot()
print(f"\nFigures can be found in {self.pppm_plots_dir}")
if postprocessing:
self.postproc_estimates()
def time_acceleration(self, loops: int = 11):
"""
Run loops number of acceleration calculations for timing estimate.
Parameters
----------
loops: int
Number of simulation steps to run. Default = 11.
"""
if self.potential.linked_list_on:
self.pp_acc_time = zeros(loops)
for i in range(loops):
self.timer.start()
self.potential.update_linked_list(self.particles)
self.pp_acc_time[i] = self.timer.stop()
# Calculate the mean excluding the first value because that time include numba compilation time
pp_mean_time = self.timer.time_division(self.pp_acc_time[1:].mean())
self.io.preprocess_timing("PP", pp_mean_time, loops)
# PM acceleration
if self.potential.pppm_on:
self.pm_acc_time = zeros(loops)
for i in range(loops):
self.timer.start()
self.potential.update_pm(self.particles)
self.pm_acc_time[i] = self.timer.stop()
pm_mean_time = self.timer.time_division(self.pm_acc_time[1:].mean())
self.io.preprocess_timing("PM", pm_mean_time, loops)
if self.potential.method == "fmm":
self.fmm_acc_time = zeros(loops)
for i in range(loops):
self.timer.start()
self.integrator.update_accelerations(self.particles)
self.fmm_acc_time[i] = self.timer.stop()
fmm_mean_time = self.timer.time_division(self.fmm_acc_time[:].mean())
self.io.preprocess_timing("FMM", fmm_mean_time, loops)
def time_evolution_loop(self, loops: int = 11):
"""Run several loops of the equilibration and production phase to estimate the total time of the simulation.
Parameters
----------
loops: int
Number of simulation steps to run. Default = 11.
"""
if self.io.verbose:
print(f"\nRunning {loops} equilibration and production steps to estimate simulation times\n")
# Run few equilibration steps to estimate the equilibration time
self.integrator.update = self.integrator.type_setup(self.integrator.equilibration_type)
self.timer.start()
self.evolve_loop("equilibration", self.integrator.thermalization, 0, loops, self.parameters.eq_dump_step)
self.eq_mean_time = self.timer.stop() / loops
# Print the average equilibration & production times
self.io.preprocess_timing("Equilibration", self.timer.time_division(self.eq_mean_time), loops)
if self.parameters.magnetized and self.parameters.electrostatic_equilibration:
self.integrator.update = self.integrator.type_setup(self.integrator.magnetization_type)
self.timer.start()
self.evolve_loop("magnetization", self.integrator.thermalization, 0, loops, self.parameters.mag_dump_step)
self.mag_mean_time = self.timer.stop() / loops
# Print the average equilibration & production times
self.io.preprocess_timing("Magnetization", self.timer.time_division(self.mag_mean_time), loops)
# Run few production steps to estimate the equilibration time
self.integrator.update = self.integrator.type_setup(self.integrator.production_type)
self.timer.start()
self.evolve_loop("production", False, 0, loops, self.parameters.prod_dump_step)
self.prod_mean_time = self.timer.stop() / loops
self.io.preprocess_timing("Production", self.timer.time_division(self.prod_mean_time), loops)
# Print the estimate for the full run
eq_prediction = self.eq_mean_time * self.parameters.equilibration_steps
self.io.time_stamp("Equilibration", self.timer.time_division(eq_prediction))
if self.parameters.magnetized:
mag_prediction = self.mag_mean_time * self.parameters.magnetization_steps
self.io.time_stamp("Magnetization", self.timer.time_division(mag_prediction))
eq_prediction += mag_prediction
prod_prediction = self.prod_mean_time * self.parameters.production_steps
self.io.time_stamp("Production", self.timer.time_division(prod_prediction))
tot_time = eq_prediction + prod_prediction
self.io.time_stamp("Total Run", self.timer.time_division(tot_time))
def time_n_space_estimates(self, loops: int = 10):
"""Estimate simulation times and space
Parameters
----------
loops: int
Number of simulation steps to run. Default = 10.
"""
if loops:
loops += 1
self.io.preprocess_timing("header", [0, 0, 0, 0, 0, 0], 0)
if self.potential.pppm_on:
green_time = self.timer.time_division(self.green_function_timer())
self.io.preprocess_timing("GF", green_time, 0)
self.time_acceleration(loops)
self.time_evolution_loop(loops)
# Estimate size of dump folder
# Grab one file from the dump directory and get the size of it.
if not listdir(self.io.eq_dump_dir):
raise FileNotFoundError(
"Could not estimate the size of the equilibration phase dumps"
" because there are no dumps in the equilibration directory."
"Re-run .time_n_space_estimate(loops) with loops > eq_dump_step"
)
if not listdir(self.io.prod_dump_dir):
raise FileNotFoundError(
"Could not estimate the size of the production phase dumps because"
" there are no dumps in the production directory."
"Re-run .time_n_space_estimate(loops) with loops > prod_dump_step"
)
eq_dump_size = os_stat(join(self.io.eq_dump_dir, listdir(self.io.eq_dump_dir)[0])).st_size
eq_dump_fldr_size = eq_dump_size * (self.parameters.equilibration_steps / self.parameters.eq_dump_step)
# Grab one file from the dump directory and get the size of it.
prod_dump_size = os_stat(join(self.io.eq_dump_dir, listdir(self.io.eq_dump_dir)[0])).st_size
prod_dump_fldr_size = prod_dump_size * (self.parameters.production_steps / self.parameters.prod_dump_step)
# Prepare arguments to pass for print out
sizes = array([[eq_dump_size, eq_dump_fldr_size], [prod_dump_size, prod_dump_fldr_size]])
# Check for electrostatic equilibration
if self.parameters.magnetized:
if not listdir(self.io.mag_dump_dir):
raise FileNotFoundError(
"Could not estimate the size of the magnetization phase dumps because"
" there are no dumps in the production directory."
"Re-run .time_n_space_estimate(loops) with loops > mag_dump_step"
)
dump = self.parameters.mag_dump_step
mag_dump_size = os_stat(join(self.io.mag_dump_dir, "checkpoint_" + str(dump) + ".npz")).st_size
mag_dump_fldr_size = mag_dump_size * (self.parameters.magnetization_steps / self.parameters.mag_dump_step)
sizes = array(
[
[eq_dump_size, eq_dump_fldr_size],
[prod_dump_size, prod_dump_fldr_size],
[mag_dump_size, mag_dump_fldr_size],
]
)
self.io.preprocess_sizing(sizes)
def timing_study_calculation(self):
"""Estimate the best number of mesh points and cutoff radius."""
self.pppm_plots_dir = join(self.io.preprocessing_dir, "PPPM_Plots")
if not exists(self.pppm_plots_dir):
mkdir(self.pppm_plots_dir)
print("\n\n{:=^70} \n".format(" Timing Study "))
self.input_rc = | |
<gh_stars>0
from datetime import datetime
from sqlalchemy import and_
from flask import jsonify, request, make_response, Request
from flask_security import current_user
from src import api, CustomerResource, AdminResource, OpenResource, db
from .models import Product, Rating, Description, SpecificDetails, CustomerQuestion, CustomerAnswer, Category,\
PinCode, Taxes, ProductStock, SimilarProductMapping, Shop, ProductStockPinCode, OfferImage
from .schemas import ProductSchema, RatingSchema, DescriptionSchema, SpecificDetailsSchema, CustomerQuestionSchema, CustomerAnswerSchema, \
CategorySchema, PinCodeSchema, TaxesSchema, SimilarProductMappingSchema, ProductStockSchema, ShopSchema, ProductStockPinCodeSchema,\
OfferImageSchema
class CategoryResource(OpenResource):
model = Category
schema = CategorySchema
def get(self, slug):
category = self.model.query.get(slug)
if not category:
return make_response(jsonify({'error': 100, 'message': 'User not found'}), 404)
category_dump = self.schema(exclude=('parent',)).dump(category).data
db.session.commit()
return jsonify({'success': 200, 'data': category_dump})
def put(self, slug):
category = self.model.query.get(slug)
if not category:
return make_response(jsonify({'error': 100, 'message': 'Category not found'}), 404)
category, errors = self.schema().load(instance=category)
if errors:
return make_response(jsonify({'error': 101, 'message': str(errors)}), 403)
db.session.commit()
return jsonify({'success': 200, 'message': 'category updated successfully', 'data': self.schema().dump(category).data})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({'error': 102, 'message': 'category deletion failed'}))
db.session.commit()
return jsonify({'success': 200, 'message': 'category deleted successfully'})
class CategoryListResource(OpenResource):
model = Category
schema = CategorySchema
def get(self):
categories = self.model.query.filter(Category.parent_id.is_(None))
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
categories = categories.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = categories.paginate(1).items
else:
resources = categories.paginate(int(request.args['page'])).items
return jsonify({'success': 200, 'data': self.schema().dump(resources, many=True).data})
def post(self):
categories, errors = self.schema().load(request.json, session=db.session)
if errors:
return make_response(jsonify({'error': 101, 'message': str(errors)}), 403)
db.session.commit()
return jsonify({'success': 200, 'message': 'categories added successfully', 'data': self.schema().dump(categories).data})
api.add_resource(CategoryListResource, '/categories/', endpoint='categories')
api.add_resource(CategoryResource, '/category/<int:slug>/', endpoint='category')
class ProductResource(OpenResource):
model = Product
schema = ProductSchema
def get(self, slug):
product = self.model.query.get(slug)
if not product:
return make_response(jsonify({'error':100,
'message': 'User not found'}), 404)
product_dump = self.schema().dump(product).data
db.session.commit()
return jsonify({'success': 200, 'data': product_dump})
def put(self, slug):
product = self.model.query.get(slug)
if not product:
return make_response(jsonify({
'error': 100, 'message': 'Product not found'
}), 404)
product, errors = self.schema().load(instance=product)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'product updated successfully', 'data':
self.schema().dump(product).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'product deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'product deleted successfully'
})
class ProductListResource(OpenResource):
model = Product
schema = ProductSchema
def get(self):
products = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
products = products.filter(getattr(self.model, key).in_(values))
if 'page' not in request.args:
resources = products.paginate().items
else:
resources = products.paginate(int(request.args['page'])).items
return jsonify({'success': 200, 'data': self.schema().dump(resources, many=True).data})
def post(self):
products, errors = self.schema().load(request.json, session=db.session)
if errors:
return make_response(jsonify({'error': 101, 'message': str(errors)}), 403)
db.session.add(products)
db.session.commit()
return jsonify({'success': 200, 'message': 'products added successfully', 'data': self.schema().dump(products).data})
api.add_resource(ProductListResource, '/products/', endpoint='products')
api.add_resource(ProductResource, '/product/<int:slug>/', endpoint='product')
class TaxesResource(OpenResource):
model = Taxes
schema = TaxesSchema
def get(self, slug):
tax = self.model.query.get(slug)
if not tax:
return make_response(jsonify({'error':100,
'message': 'Tax not found'}), 404)
tax_dump = self.schema().dump(tax).data
db.session.commit()
return jsonify({'success': 200, 'data':tax_dump})
def put(self, slug):
tax = self.model.query.get(slug)
if not tax:
return make_response(jsonify({
'error': 100, 'message': 'Tax not found'
}), 404)
tax, errors = self.schema().load(instance=tax)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'tax updated successfully', 'data':
self.schema().dump(tax).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'tax deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'tax deleted successfully'
})
class TaxesListResource(OpenResource):
model = Taxes
schema = TaxesSchema
def get(self):
taxes = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
taxes = taxes.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = taxes.all()
else:
resources = taxes.paginate(
int(request.args['page'])).items
return jsonify({'success': 200, 'data':
self.schema().dump(resources, many=True)})
def post(self):
taxes, errors = self.schema().load(request.json,
session=db.session)
if errors:
return make_response(jsonify({'error': 101,
'message': str(errors)}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'taxes added successfully',
'data': self.schema().dump(taxes).data
})
api.add_resource(TaxesListResource, '/taxes/', endpoint='taxes')
api.add_resource(TaxesResource, '/tax/<int:slug>/', endpoint='tax')
class RatingResource(OpenResource):
model = Rating
schema = RatingSchema
def get(self, slug):
rating = self.model.query.get(slug)
if not rating:
return make_response(jsonify({'error':100,
'message': 'Rating not found'}), 404)
rating_dump = self.schema().dump(rating).data
db.session.commit()
return jsonify({'success': 200, 'data': rating_dump})
def put(self, slug):
rating = self.model.query.get(slug)
if not rating:
return make_response(jsonify({
'error': 100, 'message': 'Rating not found'
}), 404)
rating, errors = self.schema().load(instance=rating)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'rating updated successfully', 'data':
self.schema().dump(rating).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'rating deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'rating deleted successfully'
})
class RatingListResource(OpenResource):
model = Rating
schema = RatingSchema
def get(self):
ratings = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
ratings = ratings.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = ratings.all()
else:
resources = ratings.paginate(
int(request.args['page'])).items
return jsonify({'success': 200, 'data':
self.schema().dump(resources, many=True)})
def post(self):
ratings, errors = self.schema().load(request.json,
session=db.session)
if errors:
return make_response(jsonify({'error': 101,
'message': str(errors)}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'ratings added successfully',
'data': self.schema().dump(ratings).data
})
api.add_resource(RatingListResource, '/ratings/', endpoint='ratings')
api.add_resource(RatingResource, '/rating/<int:slug>/', endpoint='rating')
class DescriptionResource(OpenResource):
model = Description
schema = DescriptionSchema
def get(self, slug):
description = self.model.query.get(slug)
if not description:
return make_response(jsonify({'error':100,
'message': 'Description not found'}), 404)
description_dump = self.schema().dump(description).data
db.session.commit()
return jsonify({'success': 200, 'data': description_dump})
def put(self, slug):
description = self.model.query.get(slug)
if not description:
return make_response(jsonify({
'error': 100, 'message': 'Description not found'
}), 404)
description, errors = self.schema().load(instance=description)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'description updated successfully', 'data':
self.schema().dump(description).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'description deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'description deleted successfully'
})
class DescriptionListResource(OpenResource):
model = Description
schema = DescriptionSchema
def get(self):
descriptions = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
descriptions = descriptions.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = descriptions.all()
else:
resources = descriptions.paginate(
int(request.args['page'])).items
return jsonify({'success': 200, 'data':
self.schema().dump(resources, many=True)})
def post(self):
descriptions, errors = self.schema().load(request.json,
session=db.session)
if errors:
return make_response(jsonify({'error': 101,
'message': str(errors)}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'descriptions added successfully',
'data': self.schema().dump(descriptions).data
})
api.add_resource(DescriptionListResource, '/descriptions/', endpoint='descriptions')
api.add_resource(DescriptionResource, '/description/<int:slug>/', endpoint='description')
class SpecificDetailsResource(OpenResource):
model = SpecificDetails
schema = SpecificDetailsSchema
def get(self, slug):
specific_detail = self.model.query.get(slug)
if not specific_detail:
return make_response(jsonify({'error':100, 'message': 'SpecificDetail not found'}), 404)
specific_detail_dump = self.schema().dump(specific_detail).data
db.session.commit()
return jsonify({'success': 200, 'data': specific_detail_dump})
def put(self, slug):
specificDetail = self.model.query.get(slug)
if not specificDetail:
return make_response(jsonify({
'error': 100, 'message': 'SpecificDetail not found'
}), 404)
specificDetail, errors = self.schema().load(instance=specificDetail)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'specificDetail updated successfully', 'data':
self.schema().dump(specificDetail).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'specificDetail deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'specificDetail deleted successfully'
})
class SpecificDetailsListResource(OpenResource):
model = SpecificDetails
schema = SpecificDetailsSchema
def get(self):
specificDetails = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
specificDetails = specificDetails.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = specificDetails.all()
else:
resources = specificDetails.paginate(
int(request.args['page'])).items
return jsonify({'success': 200, 'data':
self.schema().dump(resources, many=True)})
def post(self):
specificDetails, errors = self.schema().load(request.json,
session=db.session)
if errors:
return make_response(jsonify({'error': 101,
'message': str(errors)}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'specificDetails added successfully',
'data': self.schema().dump(specificDetails).data
})
api.add_resource(SpecificDetailsListResource, '/specificDetails/', endpoint='specificDetails')
api.add_resource(SpecificDetailsResource, '/specificDetail/<int:slug>/', endpoint='specificDetail')
class CustomerQuestionResource(OpenResource):
model = CustomerQuestion
schema = CustomerQuestionSchema
def get(self, slug):
customerQuestion = self.model.query.get(slug)
if not customerQuestion:
return make_response(jsonify({'error':100,
'message': 'Customer question not found'}), 404)
customerQuestion_dump = self.schema().dump(customerQuestion).data
db.session.commit()
return jsonify({'success': 200, 'data': customerQuestion_dump})
def put(self, slug):
customerQuestion = self.model.query.get(slug)
if not customerQuestion:
return make_response(jsonify({
'error': 100, 'message': 'Customer question not found'
}), 404)
customerQuestion, errors = self.schema().load(instance=customerQuestion)
if errors:
return make_response(jsonify({
'error': 101, 'message': str(errors)
}), 403)
db.session.commit()
return jsonify({
'success': 200, 'message': 'customerQuestion updated successfully', 'data':
self.schema().dump(customerQuestion).data
})
def delete(self, slug):
is_deleted = self.model.query.delete(slug)
if not is_deleted:
return make_response(jsonify({
'error': 102, 'message': 'customerQuestion deletion failed'
}))
db.session.commit()
return jsonify({
'success': 200, 'message': 'customerQuestion deleted successfully'
})
class CustomerQuestionListResource(OpenResource):
model = CustomerQuestion
schema = CustomerQuestionSchema
def get(self):
customerQuestions = self.model.query
for key in request.args:
if hasattr(self.model, key):
values = request.args.getlist(key)
customerQuestions = customerQuestions.filter(getattr(self.model,
key).in_(values))
if 'page' not in request.args:
resources = customerQuestions.all()
else:
resources = customerQuestions.paginate(
int(request.args['page'])).items
return jsonify({'success': 200, 'data':
self.schema().dump(resources, many=True)})
def | |
list of the bounds for the hyperparameters."""
return [
Integer(10, 500, name="n_estimators"),
Categorical(half_to_one_inc, name="max_samples"),
Categorical(half_to_one_inc, name="max_features"),
Categorical([True, False], name="bootstrap"),
Categorical([True, False], name="bootstrap_features"),
]
class ExtraTrees(BaseModel):
"""Extremely Randomized Trees."""
acronym = "ET"
fullname = "Extra-Trees"
needs_scaling = False
accepts_sparse = True
supports_gpu = False
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
if self.T.goal == "class":
return ExtraTreesClassifier
else:
return ExtraTreesRegressor
def get_parameters(self, x):
"""Return a dictionary of the model's hyperparameters."""
params = super().get_parameters(x)
if not self._get_param(params, "bootstrap"):
params.pop("max_samples", None)
return params
def get_dimensions(self):
"""Return a list of the bounds for the hyperparameters."""
if self.T.goal == "class":
criterion = ["gini", "entropy"]
else:
criterion = ["squared_error", "absolute_error"]
return [
Integer(10, 500, name="n_estimators"),
Categorical(criterion, name="criterion"),
Categorical([None, *range(1, 17)], name="max_depth"),
Integer(2, 20, name="min_samples_split"),
Integer(1, 20, name="min_samples_leaf"),
Categorical(
categories=["auto", "sqrt", "log2", *half_to_one_exc, None],
name="max_features",
),
Categorical([True, False], name="bootstrap"),
Categorical([None, *half_to_one_exc], name="max_samples"),
Real(0, 0.035, name="ccp_alpha"),
]
class RandomForest(BaseModel):
"""Random Forest."""
acronym = "RF"
fullname = "Random Forest"
needs_scaling = False
accepts_sparse = True
supports_gpu = True
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
if self.T.goal == "class":
return self.T._get_gpu(RandomForestClassifier, "cuml.ensemble")
else:
return self.T._get_gpu(RandomForestRegressor, "cuml.ensemble")
def get_parameters(self, x):
"""Return a dictionary of the model's hyperparameters."""
params = super().get_parameters(x)
if not self._get_param(params, "bootstrap"):
params.pop("max_samples", None)
return params
def get_dimensions(self):
"""Return a list of the bounds for the hyperparameters."""
if self.T.goal == "class":
criterion = ["gini", "entropy"]
elif self._gpu:
criterion = ["mse", "poisson", "gamma", "inverse_gaussian"]
else:
criterion = ["squared_error", "absolute_error", "poisson"]
dimensions = [
Integer(10, 500, name="n_estimators"),
Categorical(criterion, name="criterion"),
Categorical([None, *range(1, 17)], name="max_depth"),
Integer(2, 20, name="min_samples_split"),
Integer(1, 20, name="min_samples_leaf"),
Categorical(
categories=["auto", "sqrt", "log2", *half_to_one_exc, None],
name="max_features",
),
Categorical([True, False], name="bootstrap"),
Categorical([None, *half_to_one_exc], name="max_samples"),
]
if self._gpu:
dimensions[1].name = "split_criterion"
dimensions[2].categories = list(range(1, 17))
dimensions[5].categories = ["auto", "sqrt", "log2", *half_to_one_exc]
dimensions[7].categories = half_to_one_exc
else:
dimensions.append(Real(0, 0.035, name="ccp_alpha"))
return dimensions
class AdaBoost(BaseModel):
"""Adaptive Boosting (with decision tree as base estimator)."""
acronym = "AdaB"
fullname = "AdaBoost"
needs_scaling = False
accepts_sparse = True
supports_gpu = False
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
if self.T.goal == "class":
return AdaBoostClassifier
else:
return AdaBoostRegressor
def get_dimensions(self):
"""Return a list of the bounds for the hyperparameters."""
dimensions = [
Integer(50, 500, name="n_estimators"),
Real(0.01, 10, "log-uniform", name="learning_rate"),
]
if self.T.goal == "class":
dimensions.append(Categorical(["SAMME.R", "SAMME"], name="algorithm"))
else:
loss = ["linear", "square", "exponential"]
dimensions.append(Categorical(loss, name="loss"))
return dimensions
class GradientBoostingMachine(BaseModel):
"""Gradient Boosting Machine."""
acronym = "GBM"
fullname = "Gradient Boosting Machine"
needs_scaling = False
accepts_sparse = True
supports_gpu = False
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
if self.T.goal == "class":
return GradientBoostingClassifier
else:
return GradientBoostingRegressor
def get_parameters(self, x):
"""Return a dictionary of the model's hyperparameters."""
params = super().get_parameters(x)
if self._get_param(params, "loss") not in ("huber", "quantile"):
params.pop("alpha", None)
return params
def get_dimensions(self):
"""Return a list of the bounds for the hyperparameters."""
dimensions = [] # Multiclass classification only works with deviance loss
if self.T.task.startswith("bin"):
dimensions.append(Categorical(["deviance", "exponential"], name="loss"))
elif self.T.task.startswith("reg"):
loss = ["squared_error", "absolute_error", "huber", "quantile"]
dimensions.append(Categorical(loss, name="loss"))
dimensions.extend(
[
Real(0.01, 1.0, "log-uniform", name="learning_rate"),
Integer(10, 500, name="n_estimators"),
Categorical(half_to_one_inc, name="subsample"),
Categorical(["friedman_mse", "squared_error"], name="criterion"),
Integer(2, 20, name="min_samples_split"),
Integer(1, 20, name="min_samples_leaf"),
Integer(1, 21, name="max_depth"),
Categorical(
categories=["auto", "sqrt", "log2", *half_to_one_exc, None],
name="max_features",
),
Real(0, 0.035, name="ccp_alpha"),
]
)
if self.T.goal == "reg":
dimensions.append(Categorical(half_to_one_exc, name="alpha"))
return dimensions
class HistGBM(BaseModel):
"""Histogram-based Gradient Boosting Machine."""
acronym = "hGBM"
fullname = "HistGBM"
needs_scaling = False
accepts_sparse = False
supports_gpu = False
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
if self.T.goal == "class":
return HistGradientBoostingClassifier
else:
return HistGradientBoostingRegressor
def get_dimensions(self):
"""Return a list of the bounds for the hyperparameters."""
dimensions = []
if self.T.goal == "reg":
loss = ["squared_error", "absolute_error", "poisson"]
dimensions.append(Categorical(loss, name="loss"))
dimensions.extend(
[
Real(0.01, 1.0, "log-uniform", name="learning_rate"),
Integer(10, 500, name="max_iter"),
Integer(10, 50, name="max_leaf_nodes"),
Categorical([None, *range(1, 17)], name="max_depth"),
Integer(10, 30, name="min_samples_leaf"),
Categorical(zero_to_one_inc, name="l2_regularization"),
]
)
return dimensions
class XGBoost(BaseModel):
"""Extreme Gradient Boosting."""
acronym = "XGB"
fullname = "XGBoost"
needs_scaling = True
accepts_sparse = True
supports_gpu = True
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
from xgboost import XGBClassifier, XGBRegressor
if self.T.goal == "class":
return XGBClassifier
else:
return XGBRegressor
def get_estimator(self, **params):
"""Return the model's estimator with unpacked parameters."""
if self.T.random_state is None: # XGBoost can't handle random_state to be None
random_state = params.pop("random_state", random.randint(0, 1e5))
else:
random_state = params.pop("random_state", self.T.random_state)
return self.est_class(
use_label_encoder=params.pop("use_label_encoder", False),
n_jobs=params.pop("n_jobs", self.T.n_jobs),
tree_method=params.pop("tree_method", "gpu_hist" if self.T.gpu else None),
verbosity=params.pop("verbosity", 0),
random_state=random_state,
**params,
)
def custom_fit(self, est, train, validation=None, **params):
"""Fit the model using early stopping and update evals attr."""
from xgboost.callback import EarlyStopping
n_estimators = est.get_params().get("n_estimators", 100)
rounds = self._get_early_stopping_rounds(params, n_estimators)
eval_set = params.pop("eval_set", [train, validation] if validation else None)
callbacks = params.pop("callbacks", [])
if rounds: # Add early stopping callback
callbacks.append(EarlyStopping(rounds, maximize=True))
est.fit(
X=train[0],
y=train[1],
eval_set=eval_set,
verbose=params.get("verbose", False),
callbacks=callbacks,
**params,
)
if validation:
# Create evals attribute with train and validation scores
metric_name = list(est.evals_result()["validation_0"])[0]
self.evals = {
"metric": metric_name,
"train": est.evals_result()["validation_0"][metric_name],
"test": est.evals_result()["validation_1"][metric_name],
}
self._stopped = (len(self.evals["train"]), n_estimators)
@staticmethod
def get_dimensions():
"""Return a list of the bounds for the hyperparameters."""
return [
Integer(20, 500, name="n_estimators"),
Real(0.01, 1.0, "log-uniform", name="learning_rate"),
Integer(1, 20, name="max_depth"),
Real(0, 1.0, name="gamma"),
Integer(1, 10, name="min_child_weight"),
Categorical(half_to_one_inc, name="subsample"),
Categorical([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], name="colsample_bytree"),
Categorical([0, 0.01, 0.1, 1, 10, 100], name="reg_alpha"),
Categorical([0, 0.01, 0.1, 1, 10, 100], name="reg_lambda"),
]
class LightGBM(BaseModel):
"""Light Gradient Boosting Machine."""
acronym = "LGB"
fullname = "LightGBM"
needs_scaling = True
accepts_sparse = True
supports_gpu = True
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
from lightgbm.sklearn import LGBMClassifier, LGBMRegressor
if self.T.goal == "class":
return LGBMClassifier
else:
return LGBMRegressor
def get_estimator(self, **params):
"""Return the model's estimator with unpacked parameters."""
return self.est_class(
n_jobs=params.pop("n_jobs", self.T.n_jobs),
device=params.pop("device", "gpu" if self.T.gpu else "cpu"),
random_state=params.pop("random_state", self.T.random_state),
**params,
)
def custom_fit(self, est, train, validation=None, **params):
"""Fit the model using early stopping and update evals attr."""
from lightgbm.callback import early_stopping, log_evaluation
n_estimators = est.get_params().get("n_estimators", 100)
rounds = self._get_early_stopping_rounds(params, n_estimators)
eval_set = params.pop("eval_set", [train, validation] if validation else None)
callbacks = params.pop("callbacks", [log_evaluation(0)])
if rounds: # Add early stopping callback
callbacks.append(early_stopping(rounds, True, False))
est.fit(
X=train[0],
y=train[1],
eval_set=eval_set,
callbacks=callbacks,
**params,
)
if validation:
# Create evals attribute with train and validation scores
metric_name = list(est.evals_result_["training"])[0] # Get first key
self.evals = {
"metric": metric_name,
"train": est.evals_result_["training"][metric_name],
"test": est.evals_result_["valid_1"][metric_name],
}
self._stopped = (len(self.evals["train"]), n_estimators)
@staticmethod
def get_dimensions():
"""Return a list of the bounds for the hyperparameters."""
return [
Integer(20, 500, name="n_estimators"),
Real(0.01, 1.0, "log-uniform", name="learning_rate"),
Categorical([-1, *range(1, 17)], name="max_depth"),
Integer(20, 40, name="num_leaves"),
Categorical([1e-4, 1e-3, 0.01, 0.1, 1, 10, 100], name="min_child_weight"),
Integer(1, 30, name="min_child_samples"),
Categorical(half_to_one_inc, name="subsample"),
Categorical([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], name="colsample_bytree"),
Categorical([0, 0.01, 0.1, 1, 10, 100], name="reg_alpha"),
Categorical([0, 0.01, 0.1, 1, 10, 100], name="reg_lambda"),
]
class CatBoost(BaseModel):
"""Categorical Boosting Machine."""
acronym = "CatB"
fullname = "CatBoost"
needs_scaling = True
accepts_sparse = True
supports_gpu = True
goal = ["class", "reg"]
@property
def est_class(self):
"""Return the estimator's class."""
from catboost import CatBoostClassifier, CatBoostRegressor
if self.T.goal == "class":
return CatBoostClassifier
else:
return CatBoostRegressor
def get_estimator(self, **params):
"""Return the model's estimator with unpacked parameters."""
return self.est_class(
bootstrap_type=params.pop("bootstrap_type", "Bernoulli"), # For subsample
train_dir=params.pop("train_dir", ""),
allow_writing_files=params.pop("allow_writing_files", False),
thread_count=params.pop("n_jobs", self.T.n_jobs),
task_type=params.pop("task_type", "GPU" if self.T.gpu else "CPU"),
verbose=params.pop("verbose", False),
random_state=params.pop("random_state", self.T.random_state),
**params,
)
def custom_fit(self, est, train, validation=None, **params):
"""Fit the model using early stopping and update evals attr."""
n_estimators = est.get_params().get("n_estimators", 100)
rounds = self._get_early_stopping_rounds(params, n_estimators)
est.fit(
X=train[0],
y=train[1],
eval_set=params.pop("eval_set", validation),
early_stopping_rounds=rounds,
**params,
)
if validation:
# Create evals attribute with train and validation scores
metric_name = list(est.evals_result_["learn"])[0] # Get first key
self.evals = {
"metric": metric_name,
"train": est.evals_result_["learn"][metric_name],
"test": est.evals_result_["validation"][metric_name],
}
self._stopped = (len(self.evals["train"]), n_estimators)
@staticmethod
def get_dimensions():
"""Return a list of the bounds for the hyperparameters."""
return [
Integer(20, 500, name="n_estimators"),
Real(0.01, 1.0, "log-uniform", name="learning_rate"),
Categorical([None, *range(1, 17)], name="max_depth"),
Integer(1, 30, name="min_child_samples"),
Categorical(half_to_one_inc, name="subsample"),
Categorical([0, 0.01, 0.1, 1, 10, 100], name="reg_lambda"),
]
class LinearSVM(BaseModel):
"""Linear Support Vector Machine."""
acronym = "lSVM"
fullname = "Linear SVM"
needs_scaling = True
accepts_sparse = True
supports_gpu = True
goal = ["class", "reg"]
@property
def | |
<filename>ecmwfapi/custom_http/custom_http.py
#
# (C) Copyright 2012-2013 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
# (C) Copyright 2017 <NAME>.
from .exceptions import CustomHttpError
import concurrent.futures
import httplib2
import socket
import sys
import time
# Python 2 backwards compatibility: queue module called Queue in python 2
if sys.version_info > (3, 0):
import queue
else:
import Queue as queue
def get_request(url, headers=None, timeout=30, disable_ssl_validation=False):
"""
Retrieve contents of a page, passing any exceptions. Does not follow redirects.
:param url: URL to retrieve
:param headers: request headers
:param timeout: timeout of request in seconds
:param disable_ssl_validation: whether to disable SSL validation in httplib2
:return: page data
"""
try:
h = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
h.follow_redirects = False
resp, content = h.request(url, 'GET', '', headers=headers)
except (httplib2.ServerNotFoundError, ConnectionResetError, ConnectionAbortedError, ConnectionRefusedError,
ConnectionError) as e:
raise CustomHttpError("Could not retrieve URL %s. Additional info: %s" % (url, e))
except socket.timeout:
raise CustomHttpError("Request timed out after specified timeout period of %s seconds" % timeout)
except Exception as e:
raise CustomHttpError("Other unknown exception: %s" % e)
return [resp, content]
def post_request(url, data, headers=None, timeout=30, disable_ssl_validation=False):
"""
Retrieve contents of a page with a POST request and one payload data object, passing any exceptions. Does not
follow redirects.
:param url: URL to retrieve
:param data: Payload data object
:param headers: request headers
:param timeout: timeout of request in seconds
:param disable_ssl_validation: whether to disable SSL validation in httplib2
:return: page data
"""
# Prepare headers even if there are none, as we're doing a post request
if headers is None:
headers = {}
headers['Content-type'] = 'application/x-www-form-urlencoded'
try:
h = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
h.follow_redirects = False
resp, content = h.request(url, 'POST', data, headers=headers)
except (httplib2.ServerNotFoundError, ConnectionResetError, ConnectionAbortedError, ConnectionRefusedError,
ConnectionError) as e:
raise CustomHttpError("Could not retrieve URL %s. Additional info: %s" % (url, e))
except socket.timeout:
raise CustomHttpError("Request timed out after specified timeout period of %s seconds" % timeout)
except Exception as e:
raise CustomHttpError("Other unknown exception: %s" % e)
return [resp, content]
def delete_request(url, headers=None, timeout=30, disable_ssl_validation=False):
"""
Retrieve contents of a page, passing any exceptions. Does not follow redirects.
:param url: URL to retrieve
:param headers: request headers
:param timeout: timeout of request in seconds
:param disable_ssl_validation: whether to disable SSL validation in httplib2
:return: page data
"""
try:
h = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
h.follow_redirects = False
resp, content = h.request(url, 'DELETE', '', headers=headers)
except (httplib2.ServerNotFoundError, ConnectionResetError, ConnectionAbortedError, ConnectionRefusedError,
ConnectionError) as e:
raise CustomHttpError("Could not retrieve URL %s. Additional info: %s" % (url, e))
except socket.timeout:
raise CustomHttpError("Request timed out after specified timeout period of %s seconds" % timeout)
except Exception as e:
raise CustomHttpError("Other unknown exception: %s" % e)
return [resp, content]
def robust_get_file(url, file_handle, block_size=1048576, timeout=20, disable_ssl_validation=False):
"""
Download an object in a robust way using HTTP partial downloading
:param url: URL to download
:param file_handle: open pointer to file to store download in, data is appended
:param block_size: size of individual download chunks during partial downloading
:param timeout: timeout in seconds till individual block downloads are failed
:param disable_ssl_validation: whether to disable SSL validation in httplib2
:return: None
"""
# Verify block size parameter
if not isinstance(block_size, int):
raise CustomHttpError("The block size should be an integer")
elif block_size < 512:
raise CustomHttpError("The block size should be at least 512 bytes")
elif block_size > 268435456:
raise CustomHttpError("The block size can not be more than 256 megabytes")
# Verify timeout parameter
if not isinstance(timeout, int):
raise CustomHttpError("The timeout should be an integer")
elif timeout < 1:
raise CustomHttpError("The timeout should be at least 1 second")
elif timeout > 86400:
raise CustomHttpError("The timeout can not be more than 86400 seconds")
# Define HTTP handler
http_handle = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
# Retrieve header first in order to determine file size
connected = False
connection_retries = 0
headers = None
while not connected:
if connection_retries > 0:
print("Failed to retrieve header information, retry %s of 5" % connection_retries)
try:
headers, _ = http_handle.request(url, 'HEAD', '', headers={})
connected = True
except httplib2.ServerNotFoundError as e:
connection_retries += 1
if connection_retries > 5:
raise CustomHttpError("The IP address of %s could not be determined. Additional info: %s" % (url, e))
except socket.timeout:
connection_retries += 1
if connection_retries > 5:
raise CustomHttpError("The connection with %s timed out while retrieving header information" % url)
try:
content_length = int(headers['content-length'])
except KeyError:
raise CustomHttpError("Content length not set")
block_start = 0
block_end = block_size
if block_end > content_length:
block_end = content_length - 1
while content_length > block_start and block_end != block_start:
file_handle.write(_get_block(http_handle, url, block_start, block_end))
block_start = block_end + 1
block_end += block_size
if block_end >= content_length:
block_end = content_length - 1
return content_length
def robust_get_file_parallel(url, file_handle, block_size=1048576, timeout=20, disable_ssl_validation=False, threads=5):
"""
Download an object in a robust way using HTTP partial downloading, and process multiple blocks in parallel
:param url: URL to download
:param file_handle: open pointer to file to store download in, data is appended
:param block_size: size of individual download chunks during partial downloading
:param timeout: timeout in seconds till individual block downloads are failed
:param disable_ssl_validation: whether to disable SSL validation in httplib2
:param threads: number of threads to download blocks
:return: None
"""
# Verify block size parameter
if not isinstance(block_size, int):
raise CustomHttpError("The block size should be an integer")
elif block_size < 512:
raise CustomHttpError("The block size should be at least 512 bytes")
elif block_size > 268435456:
raise CustomHttpError("The block size can not be more than 256 megabytes")
# Verify timeout parameter
if not isinstance(timeout, int):
raise CustomHttpError("The timeout should be an integer")
elif timeout < 1:
raise CustomHttpError("The timeout should be at least 1 second")
elif timeout > 86400:
raise CustomHttpError("The timeout can not be more than 86400 seconds")
# Define block result storage
result_blocks = {}
# Define work queue
work_queue = queue.Queue()
# Launch worker threads
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=threads)
for i in range(threads):
thread_pool.submit(_thread_download, work_queue, result_blocks, url, timeout, disable_ssl_validation)
# Define HTTP handler
http_handle = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
# Retrieve header first in order to determine file size
connected = False
connection_retries = 0
headers = None
while not connected:
if connection_retries > 0:
print("Failed to retrieve header information, retry %s of 5" % connection_retries)
try:
headers, _ = http_handle.request(url, 'HEAD', '', headers={})
connected = True
except httplib2.ServerNotFoundError as e:
connection_retries += 1
if connection_retries > 5:
raise CustomHttpError("The IP address of %s could not be determined. Additional info: %s" % (url, e))
except socket.timeout:
connection_retries += 1
if connection_retries > 5:
raise CustomHttpError("The connection with %s timed out while retrieving header information" % url)
try:
content_length = int(headers['content-length'])
except KeyError:
raise CustomHttpError("Content length not set")
block_start = 0
block_end = block_size
if block_end > content_length:
block_end = content_length - 1
block_id = 0
while content_length > block_start and block_end != block_start:
work_queue.put([block_id, block_start, block_end])
block_start = block_end + 1
block_end += block_size
if block_end >= content_length:
block_end = content_length - 1
block_id += 1
# Insert poison pills in queue
for _ in range(threads):
work_queue.put(None)
# Write all result blocks to the result file
written_block_id = 0
while written_block_id < block_id:
written = False
while not written:
try:
file_handle.write(result_blocks[written_block_id])
written = True
result_blocks.pop(written_block_id)
written_block_id += 1
except KeyError:
time.sleep(0.1)
return content_length
def _get_block(http_handle, url, block_start, block_end):
headers = {
'Range': 'bytes=%s-%s' % (block_start, block_end)
}
content = None
completed = False
try_count = 0
while not completed and try_count < 7:
try:
resp, content = http_handle.request(url, 'GET', '', headers)
completed = True
except Exception as e:
print("Failed a block, retrying (%s)" % e)
try_count += 1
if not completed:
raise CustomHttpError("Downloading of block failed after 7 retries")
return content
def _thread_download(work_queue, result_blocks, url, timeout, disable_ssl_validation):
"""
Download a block and save result in provided dictionary. Called by the thread pool.
:param work_queue: queue | |
<reponame>chuckie82/psgeom<filename>psgeom/camera.py<gh_stars>0
#!/usr/bin/env python
"""
Detector Geometries
===================
Briefly, geometries are organized heriarchically: there are `SensorElements`
that compose the leaves of a tree. These represent arbitrary objects that
actually measure e.g. photon intensities of scattered x-rays. An example of
such an element is an ASIC on a Pilatus, or a 2x1 on a CSPAD.
These elements are then "mounted" onto `CompoundCamera`s, which represent
physical units that translate and rotate together. For example, 8 CSPAD 2x1s
are mounted on a single "Quad", that moves as a collective unit. The elements
composing a CompoundCamera` instance can be SensorElements or other
CompoundCameras.
A note about how the heirarchical geometry orientation is applied. Each node
in the graph contains a rotation and translation with respect to it's parent.
The translation is applied *first*. So if T is a translation operator, and
R is a rotation,
x_final = Rx + p
In practice, both are performed using matrices.
Author: <NAME> <<EMAIL>>
June 11, 2015
"""
import re
import h5py
import warnings
import numpy as np
import scipy.ndimage.interpolation as interp
from psgeom import moveable
from psgeom import sensors
from psgeom import translate
from psgeom import basisgrid
from psgeom import metrology
_STRICT = False # global used for some testing purposes, ignore this
def arctan3(y, x):
"""
Compute the inverse tangent. Like arctan2, but returns a value in [0,2pi].
"""
theta = np.arctan2(y,x)
if type(theta) == np.ndarray:
theta[theta < 0.0] += 2 * np.pi
else:
if theta < 0.0: theta += 2 * np.pi
return theta
class CompoundCamera(moveable.MoveableParent, moveable.MoveableObject):
"""
The compound camera class contains its own local rotation and translation
operations that provide a local frame for a set of children. The children
can either be SensorElements or other CompoundCameras.
"""
def __init__(self, type_name=None, id_num=0, parent=None,
rotation_angles=np.array([0.0, 0.0, 0.0]),
translation=np.array([0.0, 0.0, 0.0])):
"""
Create a CompoundCamera.
Parameters
----------
type_name : str
Give this detector a descriptive name. Often there might be
two different instances of CompoundCamera with the same name,
if they are identical units. E.g., "QUAD:V1".
id_num : int
The unit should have an index. This is not only a unique identifier
but helps order elements within the camera tree, which can change
the way someone wants to map pixel intensities (somewhere else in
memory) onto the camera geometry.
parent : CompoundCamera
The parent frame, specified by an instance of CompoundCamera.
rotation_angles : np.ndarray
Three Cardan angles specifying the local frame rotation operator.
Argument must be a one-D 3-vector.
translation : np.ndarray
The xyz translation of the local frame. Argument must be a one-D
3-vector.
"""
self._type_name = type_name
self._id = id_num
self.set_parent(parent)
self._children = []
self._rotation_angles = rotation_angles
self._translation = translation
return
def add_child(self, child):
"""
Add a child to the compound camera. This can either be a
`SensorElement` or another `CompoundCamera`.
Parameters
----------
child : SensorElement or CompoundCamera
The child object to add to this CompoundCamera node.
"""
if not (isinstance(child, CompoundCamera) or \
isinstance(child, sensors.SensorElement)):
raise TypeError('`child` must be type: SensorElement or '
'CompoundCamera')
for c in self.children:
if c.name == child.name:
if c is child:
raise NameError('Child object already registered with parent!')
else:
raise NameError('Child with name %s already registered with'
' this parent (%s) -- please change the ID'
' number to give this object a unique name '
'and re-register it as a child object' % \
(child.name, self.name))
self.children.append(child)
child._parent = self
return
def draw_tree(self):
"""
Sketch the camera tree, with this node as the root (higher levels in
the heirarchy will not be shown)
"""
print "--- " + str(self.name)
def draw_child_tree(current, depth):
for c in current.children:
print depth * " " + "|-- " + str(c.name)
if hasattr(c, 'children'):
draw_child_tree(c, depth + 1)
draw_child_tree(self, 1)
return
def _sort_tree(self):
"""
Order the tree by the id_num of each tree node.
"""
self._children = sorted(self._children, key=lambda x : x.id_num)
for c in self.children:
if hasattr(c, '_sort_tree'):
c._sort_tree()
return
@property
def id_num(self):
return self._id
@property
def num_children(self):
return len(self._children)
@property
def children(self):
return self._children
@property
def leaves(self):
leaves = []
def add_leaves(node):
for c in node.children:
if hasattr(c, 'children'):
add_leaves(c)
else:
leaves.append(c)
add_leaves(self)
return leaves
@property
def num_pixels(self):
return np.sum([ c.num_pixels for c in self._children ])
@property
def xyz(self):
return np.array([ c.xyz for c in self._children ])
def to_psana_file(self, filename, title='geometry'):
"""
Write a geometry in psana format.
Parameters
----------
filename : str
The path of the file on disk.
Optional Parameters
-------------------
title : str
Title of the geometry saved inside the file
"""
translate.write_psana(self, filename, title)
return
@classmethod
def from_psana_file(cls, filename):
"""
Load a geometry in psana format.
Parameters
----------
filename : str
The path of the file on disk.
Returns
-------
root : CompoundCamera
The CompoundCamera instance
References
----------
..[1] https://confluence.slac.stanford.edu/display/PSDM/Detector+Geometry
"""
ret = translate.load_psana(cls, filename)
ret._sort_tree()
return ret
class CompoundAreaCamera(CompoundCamera):
"""
A specific kind of CompoundCamera, one with sensor elements that are
planar rectangles. Most detectors should be CompoundAreaCameras.
"""
def to_text_file(self, filename):
"""
Write a geometry in raw text psf format.
Parameters
----------
filename : str
The path of the file on disk.
"""
translate.write_psf_text(self, filename)
return
@classmethod
def from_text_file(cls, filename):
"""
Load a geometry in raw-text psf format.
Parameters
----------
filename : str
The path of the file on disk.
Returns
-------
root : detector.CompoundCamera
The CompoundCamera instance
"""
raise NotImplementedError()
def to_hdf5(self, filename):
"""
Save a geometry's xyz coordinates (self.xyz) in an HDF file.
Parameters
----------
filename : str
The path of the file on disk.
"""
f = h5py.File(filename, 'w')
f['xyz'] = self.xyz
f.close()
return
def from_hdf5(self, filename):
raise NotImplementedError()
def to_basisgrid(self):
"""
Convert this object to a BasisGrid object, which represents the camera
geometry as a set of vectors specifying the slow-scan and fast-scan
edges of a set of panels
Returns
-------
bg : basisgrid.BasisGrid
The basisgrid object.
"""
bg = basisgrid.BasisGrid()
for sensor in self.leaves:
if not isinstance(sensor, sensors.PixelArraySensor):
raise TypeError('basisgrid representation is only compatible '
'with detectors that are entirely comprised of '
'PixelArrayElements')
p, s, f = sensor.psf
bg.add_grid(p, s, f, sensor.shape)
return bg
@classmethod
def from_basisgrid(cls, bg, element_type=sensors.Mtrx):
"""
Convert a BasisGrid object to a CompoundCamera.
Parameters
----------
bg : basisgrid.BasisGrid
The basisgrid object to convert.
element_type : sensors.PixelArraySensor
The SensorElement type to populate the camera with.
Returns
-------
cd : CompoundCamera
The compound camera instance.
"""
if not isinstance(bg, basisgrid.BasisGrid):
raise TypeError('`bg` argument must be instance of BasisGrid,'
' got: %s' % type(bg))
cd = cls(type_name='root_frame', id_num=0, parent=None)
for g in range(bg.num_grids):
p, s, f, shape = bg.get_grid(g)
pixel_shape = (np.linalg.norm(s),
np.linalg.norm(f))
# to compute the rotation, find the
us = s / pixel_shape[0] # unit vector
uf = f / pixel_shape[1] # unit vector
n = np.cross(us, uf) # tested for orthog. in next fxn
# remember: in the matrix convention (Mikhail uses), +x is slow
# and +y is fast
ra = moveable._angles_from_rotated_frame(us, uf, n)
# translation is just p
tr = p
pas = element_type(shape,
pixel_shape,
id_num=g,
parent=cd,
rotation_angles=ra,
translation=tr)
return cd
def to_crystfel_file(self, filename, coffset=None):
"""
Write a geometry to disk in CrystFEL format. Note that some fields
will be written but left blank -- these are fields you probably should
fill in before performing any computations in CrystFEL, but are
information that we have no handle on (e.g. detector gain).
When coffset is not given, coffset is set to detector distance and
and clen is set to zero.
Thanks to <NAME> & <NAME> for assistance with this function.
Parameters
----------
filname : str
The name of file to write. Will end | |
import psycopg2
import os
from faker import Faker
from random import randint
from dotenv import load_dotenv
load_dotenv()
DATABASE = os.getenv('DATABASE')
DATABASE_USERNAME = os.getenv('DATABASE_USERNAME')
DATABASE_PASSWORD = os.getenv('DATABASE_PASSWORD')
# DATABASE_URL = os.environ.get('DATABASE_URL')
con = psycopg2.connect(database=DATABASE,user=DATABASE_USERNAME,
password=<PASSWORD>)
# con = psycopg2.connect(DATABASE_URL)
cur = con.cursor()
num_exams = 10
num_subjects = 10
num_questions = 5
num_options = 4
initial_preg_id = 1
preguntas_mate = [
{
"texto_pregunta": "La expresión 2x + 3 = 7 es una",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"inecuacion",
"es_correcta": False
},
{
"texto_opcion":"desigualdad",
"es_correcta": False
},
{
"texto_opcion":"ecuación",
"es_correcta": True
},
{
"texto_opcion":"identidad",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Al resolver 3x + 2 = 4, ¿cuál es el valor x",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"x = 2/3",
"es_correcta": True
},
{
"texto_opcion":"x = 3/6",
"es_correcta": False
},
{
"texto_opcion":"x = 3/2",
"es_correcta": False
},
{
"texto_opcion":"x = 6/3",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Al resolver -2x + 6 >= 16, ¿cuál es el valor de x?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"x < -5",
"es_correcta": False
},
{
"texto_opcion":"x mayor o igual a 5",
"es_correcta": False
},
{
"texto_opcion":"x menor o igual a -5",
"es_correcta": True
},
{
"texto_opcion":"x > 5",
"es_correcta": False
}
]
},
{
"texto_pregunta": "¿Cuál de las siguientes funciones tienen un comportamiento creciente?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"f(x) = -3x",
"es_correcta": False
},
{
"texto_opcion":"f(x) = 3 elevado a -x",
"es_correcta": False
},
{
"texto_opcion":"3/x",
"es_correcta": False
},
{
"texto_opcion":"x al cubo",
"es_correcta": True
}
]
},
{
"texto_pregunta": "A cuánto equivale 45° en radianes",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"1/4 pi",
"es_correcta": True
},
{
"texto_opcion":"2/3 pi",
"es_correcta": False
},
{
"texto_opcion":"3/4 pi",
"es_correcta": False
},
{
"texto_opcion":"4/3 pi",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Selecciona la función que tiene un desplazamiento de fase de pi unidades a la derecha",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"f(x) = sen(pi x)",
"es_correcta": False
},
{
"texto_opcion":"f(x) = sen( x + pi)",
"es_correcta": False
},
{
"texto_opcion":"f(x) = sen(x - pi)",
"es_correcta": True
},
{
"texto_opcion":"f(x) = pi sen(x)",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Encuentra las coordenadas del punto medio entre los puntos P(0,2) y Q(4,6)",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"(2,3)",
"es_correcta": False
},
{
"texto_opcion":"(2,4)",
"es_correcta": True
},
{
"texto_opcion":"(1,4)",
"es_correcta": False
},
{
"texto_opcion":"(1,5)",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Un triángulo está conformado por los vértices P(-7,1), Q(9,3) y R(3,5), ¿cuál es la ecuación de la mediana que pasa por el vértice P?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"x -8y + 15 = 0",
"es_correcta": False
},
{
"texto_opcion":"3x + 13y - 34 = 0",
"es_correcta": True
},
{
"texto_opcion":"3x - y + 22 = 0",
"es_correcta": False
},
{
"texto_opcion":"2x -5y + 19 = 0",
"es_correcta": False
}
]
},
{
"texto_pregunta": "La ecuación de la parábola cuyo eje focal es el eje y, con el parámetro p= -5 y vértice en el origen es",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"x^2 - 20x = 0",
"es_correcta": False
},
{
"texto_opcion":"y^2 - 20x = 0",
"es_correcta": False
},
{
"texto_opcion":"y^2 + 20x = 0",
"es_correcta": False
},
{
"texto_opcion":"x^2 + 20y = 0",
"es_correcta": True
}
]
},
{
"texto_pregunta": "Determina la expresión algebraica que cumplen las coordenadas de los puntos P(x,y), si la suma de sus distancias a los puntos F1(0,-2) y F2(0,2) es igual a 8.",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"3x^2 + 4y^2 -48 = 0",
"es_correcta": False
},
{
"texto_opcion":"4x^2 + 3y^2 - 48 = 0",
"es_correcta": True
},
{
"texto_opcion":"16x^2 + 12y^2 - 19 = 0",
"es_correcta": False
},
{
"texto_opcion":"12x^2 + 16y^2- 19 = 0",
"es_correcta": False
}
]
},
{
"texto_pregunta": "La ecuación de la hipérbola centrada en el orgien con lado recto igual a 10 y vértice en V(0,-9) es",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"9x^2-5y^2 = 405",
"es_correcta": False
},
{
"texto_opcion":"5y^2-9x^2 = 405",
"es_correcta": True
},
{
"texto_opcion":"9x^2 - 10y^2 = 90",
"es_correcta": False
},
{
"texto_opcion":"10x^2 - 9y^2 = 90",
"es_correcta": False
}
]
},
{
"texto_pregunta": "A partir de la siguiente ecuación de segundo grado determina el criterio utilizado para representar una elipse",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"C^2 - 4AB < 0",
"es_correcta": False
},
{
"texto_opcion":"B^2 - 4AC > 0",
"es_correcta": False
},
{
"texto_opcion":"C^2 - 4AB > 0",
"es_correcta": False
},
{
"texto_opcion":"B^2 - 4AC < 0",
"es_correcta": True
}
]
},
{
"texto_pregunta": "La función f(x) = |x| es derivable en todo punto de su dominio, excepto, en",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"-2",
"es_correcta": False
},
{
"texto_opcion":"-1",
"es_correcta": False
},
{
"texto_opcion":"0",
"es_correcta": True
},
{
"texto_opcion":"2",
"es_correcta": False
}
]
},
{
"texto_pregunta": "La pendiente de la tangente a la curva f(x)=e^3x en el punto P(0,1) es igual a",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"0",
"es_correcta": False
},
{
"texto_opcion":"1",
"es_correcta": False
},
{
"texto_opcion":"2",
"es_correcta": False
},
{
"texto_opcion":"3",
"es_correcta": True
}
]
},
{
"texto_pregunta": "¿A cuántos grados equivalen 11/18 pi radianes",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"220°",
"es_correcta": False
},
{
"texto_opcion":"110°",
"es_correcta": True
},
{
"texto_opcion":"169°",
"es_correcta": False
},
{
"texto_opcion":"198°",
"es_correcta": False
}
]
},
{
"texto_pregunta": "¿A cuántos grados equivalen 11/18 pi radianes?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"220°",
"es_correcta": False
},
{
"texto_opcion":"110°",
"es_correcta": True
},
{
"texto_opcion":"169°",
"es_correcta": False
},
{
"texto_opcion":"198°",
"es_correcta": False
}
]
},
{
"texto_pregunta": "¿Cuál es la ecuación de la asíntota vertical de la siguiente función: f(x) = 2log(x-3)?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"x = 3",
"es_correcta": True
},
{
"texto_opcion":"y = -3",
"es_correcta": False
},
{
"texto_opcion":"x = -3",
"es_correcta": False
},
{
"texto_opcion":"y = 3",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Encuentra las coordenadas del punto S que divide por la mitad al segmento conformado por los puntos P(-1,2) y Q(-2,5)",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"S(-0.5,3.5)",
"es_correcta": False
},
{
"texto_opcion":"S(-1.5,2.5)",
"es_correcta": False
},
{
"texto_opcion":"S(-2.5,3.5)",
"es_correcta": False
},
{
"texto_opcion":"S(-1.5, 3.5)",
"es_correcta": True
}
]
},
{
"texto_pregunta": "La pendiente de la recta 3x + 6y - 1 = 0",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"-3",
"es_correcta": False
},
{
"texto_opcion":"-1/2",
"es_correcta": True
},
{
"texto_opcion":"1/2",
"es_correcta": False
},
{
"texto_opcion":"3",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Determina la ecuación de la parábola con directriz x +3 = 0, vértice en el origen y eje focal que coincide con el eje x.",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"-3",
"es_correcta": False
},
{
"texto_opcion":"-1/2",
"es_correcta": True
},
{
"texto_opcion":"1/2",
"es_correcta": False
},
{
"texto_opcion":"3",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Dada la ecuación general de segundo grado con dos variables Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0, si B = 0, se tiene que",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"la cónica pasa por el origen del marco de referencia",
"es_correcta": False
},
{
"texto_opcion":"los ejes de simetría de la cónica son paralelos a los ejes coordenados",
"es_correcta": True
},
{
"texto_opcion":"los ejes de simetría de la cónica son perpendiculares a los ejes coordenados",
"es_correcta": False
},
{
"texto_opcion":"la cónica está centrada en el origen del marco de referencia",
"es_correcta": False
}
]
},
{
"texto_pregunta": "Un estudiante debe obtener un promedio mínimo de 8 y calificaciones no menores a 7 en cada uno de los cuatro parciales. Si tiene las siguientes calificaciones en los primeros 3 parciales: 7.2, 8.5 y 7.9, ¿cuál es la mínima calificación que deberá obtener en el último parcial para aprobar el curso?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"8.1",
"es_correcta": False
},
{
"texto_opcion":"8.2",
"es_correcta": False
},
{
"texto_opcion":"8.4",
"es_correcta": True
},
{
"texto_opcion":"8.9",
"es_correcta": False
}
]
},
{
"texto_pregunta": "¿Cuántos conejos C y gallinas G hay en un corral si en su conjunto hacen un total de 61 cabezas y 196 patas?",
"materia_id": 1,
"opciones": [
{
"texto_opcion":"C = 47, G = 14",
"es_correcta": False
},
{
"texto_opcion":"C = 40, G = 21",
| |
' ' + row["idol"] + ' (CV.' + row["cv"] + ')')
name[langint] += '[' + lim + rarity_str[row["rarity"]] + ']' + row["name"] + ' ' + row["idol"] + ' (CV.' + row["cv"] + ')\n'
print('Actived ' + str(len(mlg_data[langint])) + ' cards.([FES]' + str(count[3]) + ', [SSR]' + str(count[2]) + ', [SR]' + str(count[1]) + ', [R]' + str(count[0]) + ')')
print('Loaded cards. (Japanese:' + str(len(mlg_all[0])) + ', Korea:' + str(len(mlg_all[1])) + ', China:' + str(len(mlg_all[2])) + ')')
emb = discord.Embed(title='Pickup Cards')
if not gacha_mode[0] == 'skip': emb.add_field(name='Japanese:' + pickup_name[0], value=name[0])
if not gacha_mode[1] == 'skip': emb.add_field(name='Korean:' + pickup_name[1], value=name[1])
if not gacha_mode[2] == 'skip': emb.add_field(name='Chinese:' + pickup_name[2], value=name[2])
emb.set_footer(text='Version JA:' + mlgver[0] + ', ASIA:' + mlgver[1])
if flag == 1: await msg.edit(content='All MLreload process completed successfully.', embed=emb)
print(strtimestamp() + 'All MLreload process completed successfully.')
print(strtimestamp() + '-----------------------------------------')
return
async def gacha_note(message,langint):
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
import traceback
traceback.print_exc()
await message.channel.send(message.author.mention + _('所持SSRの記録がないか、エラーが発生しました。'))
return
text = ['']
cards = []
page = 0
count = 0
for n in range(4):
for val in mlg_all[langint]:
try:
if char_list[val["id"]] == '1' and val["rarity"] == n:
cards.insert(0, val)
except:
pass
for val in cards:
if count == 10:
text.append('')
page += 1
count = 0
text[page] += '\n[' + rarity_str[val["rarity"]] + ']' + val["name"] + ' ' + val["idol"]
count += 1
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
gacha_count = '0'
fotter_text = _('ドリームスター所持数:') + gacha_count
now = 1
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
msg = await message.channel.send(_('見終わったら×で消してね!'), embed=emb)
await msg.add_reaction('◀')
await msg.add_reaction('▶')
await msg.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '◀' and user != msg.author:
if not now == 1:
now -= 1
else:
now = len(text)
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
await msg.edit(embed=emb)
await msg.remove_reaction(target_reaction.emoji, user)
elif target_reaction.emoji == '▶' and user != msg.author:
if not now == len(text):
now += 1
else:
now = 1
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
await msg.edit(embed=emb)
await msg.remove_reaction(target_reaction, user)
elif target_reaction.emoji == '❌' and user != msg.author:
await msg.delete()
break
else:
pass
except asyncio.TimeoutError:
await msg.edit(content=_('しばらく操作がなかったため、タイムアウトしました。'),embed=None)
await asyncio.sleep(10)
await msg.delete()
break
async def mlg_touch(message,result,kind,vc,botmsg,langint):
fes_flag = 0
ssr_flag = 0
sr_flag = 0
author = message.author
if kind == 'ミリオンフェス' or kind == '百萬祭典' or kind == '밀리언 페스티벌':
for val in result:
if val["rarity"] == 3:
fes_flag = 1
pink_flag = random.randint(1, 20)
if pink_flag == 10:
img = 'https://i.imgur.com/fGpfCgB.gif'
elif pink_flag == 20:
img = 'https://i.imgur.com/jWTTZ0d.gif'
else:
img = 'https://i.imgur.com/0DxyVhm.gif'
break
elif val["rarity"] == 2:
ssr_flag = 1
elif val["rarity"] == 1:
sr_flag = 1
if not fes_flag == 1:
if ssr_flag == 1:
img = 'https://i.imgur.com/jWTTZ0d.gif'
elif sr_flag == 1 and not ssr_flag == 1:
img = 'https://i.imgur.com/vF7fDn3.gif'
else:
img = 'https://i.imgur.com/hEHa49X.gif'
else:
for val in result:
if val["rarity"] == 2:
ssr_flag = 1
break
if val["rarity"] == 1:
sr_flag = 1
if ssr_flag == 1:
img = 'https://i.imgur.com/jWTTZ0d.gif'
elif sr_flag == 1 and not ssr_flag == 1:
img = 'https://i.imgur.com/vF7fDn3.gif'
else:
img = 'https://i.imgur.com/hEHa49X.gif'
await asyncio.sleep(0.7)
waitemb = discord.Embed()
if fes_flag == 1 and pink_flag == 10: waitemb.set_image(url='https://i.imgur.com/ZC8JK9i.gif')
else: waitemb.set_image(url='https://i.imgur.com/da2w9YS.gif')
waitemb.set_footer(text=pickup_name[langint])
msg = await message.channel.send(message.author.mention, embed=waitemb)
await msg.add_reaction('👆')
try:
log = ''
count = 0
ssr_skip = []
ssr_count = []
while True:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if user == author and target_reaction.emoji == '👆':
await msg.clear_reactions()
openemb = discord.Embed()
openemb.set_footer(text=kind)
openemb.set_image(url=img)
await msg.edit(embed=openemb)
if not vc == None:
await asyncio.sleep(0.4)
if fes_flag == 1 and not pink_flag == 20:
vc.play(discord.FFmpegPCMAudio('./resources/open_fes.mp3'))
else:
vc.play(discord.FFmpegPCMAudio('./resources/open.mp3'))
while vc.is_playing():
await asyncio.sleep(1)
else:
await asyncio.sleep(6)
break
while count < len(result):
result_10 = result[count]
if result_10["rarity"] == 3:
player_show = discord.FFmpegPCMAudio('./resources/fes.mp3')
await msg.clear_reactions()
elif result_10["rarity"] == 2:
player_show = discord.FFmpegPCMAudio('./resources/ssr.mp3')
await msg.clear_reactions()
elif result_10["rarity"] <= 1:
player_show = discord.FFmpegPCMAudio('./resources/normal.mp3')
desc = rarity_str[result_10["rarity"]] + ' ' + result_10["name"] + ' ' + result_10["idol"]
mlgnormalemb = discord.Embed(title=desc, description='(CV.' + result_10["cv"] + ')', colour=int(result_10["color"], 0))
footer_text = kind + ' ' + str((count + 1)) + '/' + str(len(result))
mlgnormalemb.set_author(name=author.name, icon_url=author.avatar_url)
mlgnormalemb.set_footer(text=footer_text)
mlgnormalemb.set_image(url=result_10["image"])
if not vc == None: vc.play(player_show)
#カード表示(SSRの場合特訓前)
await msg.edit(content=author.mention, embed=mlgnormalemb)
if result_10["rarity"] >= 2:
if not vc == None:
while vc.is_playing():
await asyncio.sleep(1)
vc.play(discord.FFmpegPCMAudio('./resources/ssr_talk.mp3'))
line = result_10["ssrText"].replace("ProP", author.name + "P")
mlgssremb = discord.Embed(title=desc, description='(CV.' + result_10["cv"] + ')', colour=int(result_10["color"], 0))
mlgssremb.set_footer(text=footer_text, icon_url=author.avatar_url)
mlgssremb.set_image(url=result_10["imageAwake"])
await asyncio.sleep(4.2)
await msg.edit(content=author.mention, embed=mlgssremb)
await asyncio.sleep(3)
await msg.edit(content=author.mention + ' ' + result_10["idol"] + '「' + line + '」', embed=mlgssremb)
await msg.add_reaction('👆')
await msg.add_reaction('⏭')
while True:
target_reaction2, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction2.emoji == '👆' and user == author:
if not vc == None and vc.is_playing(): vc.stop()
count += 1
log += '[' + rarity_str[result_10["rarity"]] + ']' + result_10["name"] + ' ' + result_10["idol"] + '\n'
if count == len(result):
if not vc == None:
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await vc.disconnect()
await msg.clear_reactions()
await msg.delete()
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
print(strtimestamp() + '[ERROR]Gacha count read FAILED.')
toLog = client.get_channel(log_id)
footer_text = kind
mlglogemb = discord.Embed(title=_('ガシャ結果'), description=log + '\n' + _('ドリームスター所持数:') + gacha_count)
mlglogemb.set_author(name=author.name, icon_url=author.avatar_url)
mlglogemb.set_footer(text=footer_text)
await toLog.send(embed=mlglogemb)
break
else:
await msg.remove_reaction(target_reaction2.emoji, user)
break
elif target_reaction2.emoji == '⏭' and user == author and len(result) == 10:
for n,box in enumerate(result):
if count > n:
continue
log += '[' + rarity_str[box["rarity"]] + ']' + box["name"] + ' ' + box["idol"] + '\n'
if box["rarity"] >= 2:
ssr_skip.append(box)
ssr_count.append(str(n+1))
if len(ssr_skip) > 0:
for n,result_ssr in enumerate(ssr_skip):
if result_ssr["rarity"] == 3:
player_show = discord.FFmpegPCMAudio('./resources/fes.mp3')
await msg.clear_reactions()
elif result_ssr["rarity"] == 2:
player_show = discord.FFmpegPCMAudio('./resources/ssr.mp3')
await msg.clear_reactions()
desc = rarity_str[result_ssr["rarity"]] + ' ' + result_ssr["name"] + ' ' + result_ssr["idol"]
mlgnormalemb = discord.Embed(title=desc, description='(CV.' + result_ssr["cv"] + ')', colour=int(result_ssr["color"], 0))
footer_text = kind + ' ' + str(ssr_count[n]) + '/' + str(len(result))
mlgnormalemb.set_author(name=author.name, icon_url=author.avatar_url)
mlgnormalemb.set_footer(text=footer_text)
mlgnormalemb.set_image(url=result_ssr["image"])
if not vc == None and vc.is_playing():
vc.stop()
vc.play(player_show)
await msg.edit(content=author.mention, embed=mlgnormalemb)
if not vc == None:
while vc.is_playing():
await asyncio.sleep(1)
vc.play(discord.FFmpegPCMAudio('./resources/ssr_talk.mp3'))
line = result_ssr["ssrText"].replace('ProP', author.name + 'P')
mlgssremb = discord.Embed(title=desc, description='(CV.' + result_ssr["cv"] + ')', colour=int(result_ssr["color"], 0))
mlgssremb.set_footer(text=footer_text, icon_url=author.avatar_url)
mlgssremb.set_image(url=result_ssr["imageAwake"])
await asyncio.sleep(4.2)
await msg.edit(content=author.mention, embed=mlgssremb)
await asyncio.sleep(3)
await msg.edit(content=author.mention + ' ' + result_ssr["idol"] + '「' + line + '」', embed=mlgssremb)
await msg.add_reaction('👆')
while True:
target_reaction2, user = await client.wait_for('reaction_add')
if target_reaction2.emoji == '👆' and user == author:
if not vc == None and vc.is_playing(): vc.stop()
count += 1
await msg.remove_reaction(target_reaction2.emoji, user)
break
if not vc == None:
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await vc.disconnect()
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
print(strtimestamp() + '[ERROR]Gacha count read FAILED.')
count += 10
await msg.delete()
toLog = client.get_channel(log_id)
footer_text = kind
mlglogemb = discord.Embed(title=_('ガシャ結果'), description=log + '\n' + _('ドリームスター所持数:') + gacha_count)
mlglogemb.set_author(name=author.name, icon_url=author.avatar_url)
mlglogemb.set_footer(text=footer_text)
await toLog.send(embed=mlglogemb)
break
print(strtimestamp() + 'MLGacha complete. ' + author.name + '`s result\n' + log)
except TimeoutError:
await msg.delete()
if not vc == None:
await vc.disconnect()
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await message.channel.send(_('しばらく操作がなかったため、タイムアウトしました。'))
def voicecheck():
try:
if not client.voice_clients[0] is None:
return True
except:
return False
def pickupcheck(langint):
global pickup_id
name = ''
if gacha_mode[langint] == 'party': name = '**```打ち上げガチャ3回目の仕様です。10枚目は期間限定SSRが確定で排出されます。```**\n'
elif gacha_mode[langint] == 'special' or gacha_mode[langint] == 'final': name = '**```以下のカードのみ排出されます。```**\n'
elif gacha_mode[langint] == 'fes': name = '**```ミリオンフェス開催中!!SSR排出率が通常の2倍!```**\n'
if gacha_mode[langint] == 'party' or gacha_mode[langint] == 'type':
print('Pickup idols')
idollist = []
for row in mlg_data[langint]:
idollist.append(row["idol"])
idollist_set = set(idollist)
for row in idollist_set:
| |
for source in self.__getattribute__(dtype).column_data:
# Each datatype might have multiple sources..
# .column_data is a dict in each datatypes DataFrameHandler object
df = self.__getattribute__(dtype).column_data[source]
if not all([item in df.columns for item in mandatory_keys]):
raise exceptions.MissingKeyInData(message=os.path.basename(source))
if any(df.columns.duplicated()):
print('duplicates in data from source {} \n duplicate columns {}'.format(source, df[df.columns.duplicated()]))
raise exceptions.MissingKeyInData(message=os.path.basename(source))
self.all_data = self.all_data.append(df,
ignore_index=True)
if not len(self.all_data):
print('No data available after "merge_all_data"!')
return False
# Save pkl-file for all_data_raw. Updated 20180525 by <NAME>
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data, file_name='all_data_raw', force_save_txt=True, only_pkl=not save_to_txt)
# pickle.dump(self.all_data, open(self.export_directory + "/all_data_raw.pickle", "wb"))
# if save_to_txt:
# save_data_file(df=self.all_data,
# directory=self.export_directory,
# file_name='all_data.txt')
# Load data again. This way we can treet new and old
#"self.all_data" the same way
self.all_data = pd.DataFrame()
self.load_all_datatxt()
#==========================================================================
def load_datatypetxt(self, datatype, sep='\t', encoding='cp1252'):
"""
loads existing data files for the given datatype from export directory (from pickle if existing, otherwise from txt)
Created: 20180422 by <NAME>
Last modified: 20180422 by <NAME>
"""
# Column data file
try:
file_path = '{}/column_format_{}_data.pickle'.format(self.export_directory, datatype)
# pd_df = pickle.load(open(file_path, "rb"))
# self.add_df(pd_df, data_type) # here data_type is row or col
# TODO: should this really say self.column_data = ? It will then replace anything already in self.column_data with new content.
# self.column_data = pickle.load(open(file_path, "rb"))
self.__getattribute__(datatype).column_data = pickle.load(open(file_path, "rb"))
return True
except (OSError, IOError) as e:
return False
# try:
# file_path = '{}/column_format_{}_data.txt'.format(self.export_directory, datatype)
# self.column_data = load_data_file(file_path)
# except:
# return False
# # Raw data file
# file_path = '{}/raw_format_{}_data.txt'.format(self.export_directory, datatype)
# try:
# self.row_data = load_data_file(file_path)
# except (OSError, IOError) as e:
# return False
#
# return True
#==========================================================================
def load_all_datatxt(self, sep='\t', encoding='cp1252'):
"""
loads existing all_data file from export directory (from pickle if existing, otherwise from txt)
Created: 20180318 by <NAME>
Last modified: 20180525 by <NAME>
"""
def float_convert(x):
try:
return float(x)
except:
# print('float_convert')
return np.nan
def str_convert(x):
x = str(x)
if x == 'nan':
x = ''
return x
# print('self.all_data', len(self.all_data))
if len(self.all_data):
print('self.all_data length', len(self.all_data, 'continue to load all_data'))
# return False, False
else:
sld_object = core.SaveLoadDelete(self.export_directory) # 20180525 by <NAME>
try:
self.all_data = sld_object.load_df('all_data', load_txt=False) # 20180525 by <NAME>
# print()
# with open(self.export_directory + "/all_data.pkl", "rb") as fid:
# self.all_data = pickle.load(fid)
filetype = 'pickle'
print('all_data loaded from pickle')
except (FileNotFoundError, UnboundLocalError) as e:
# UnboundLocalError is for when df was not created in sld_object.load_df()
print('setting up all_data all_data_raw.pkl')
try:
self.all_data = sld_object.load_df('all_data_raw', load_txt=False) # 20180525 by <NAME>
# self.all_data = pickle.load(open(self.export_directory + "/all_data_raw.pickle", "rb"))
except (OSError, IOError) as e:
raise(OSError, IOError, 'Raw data pickle file does not exist! This is created during in "merge_all_data".')
# self.all_data = load_data_file(self.export_directory + '/all_data.txt')
# self.all_data = core.Load().load_txt(self.export_directory + '/all_data.txt', sep=sep, encoding=encoding, fill_nan=u'')
#TODO: better way to say which columns should be converted to float and int?
self.all_data['MONTH'] = self.all_data['SDATE'].apply(lambda x: int(x[5:7]))
self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4]))
# try:
# self.all_data['MYEAR'] = self.all_data['MYEAR'].astype(int)
# except KeyError:
self.all_data['MYEAR'] = self.all_data['YEAR']
# self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4])).astype(int)
# TODO: does not work with only datatypes that does not have column DEPH, example zoobenthos
self.all_data['DEPH'] = self.all_data['DEPH'].apply(lambda x: float(x) if x else np.nan)
self.all_data['POSITION'] = self.all_data.apply(lambda x: '{0:.2f}'.format(float_convert(x.LATIT_DD)) + '_' + '{0:.2f}'.format(float_convert(x.LONGI_DD)), axis = 1)
if 'STATN' not in self.all_data.columns:
self.all_data['STATN'] = self.all_data[self.wb_id_header]
statn = self.all_data.STATN.tolist()
pos = self.all_data.POSITION.tolist()
for i, x in enumerate(statn):
if x == "":
statn[i] = pos[i]
# set all station names to uppercase to limit number of synonyms
self.all_data['STATN'] = [s.upper() for s in statn]
if 'MNDEP' not in self.all_data.columns:
self.all_data['MNDEP'] = np.nan
self.all_data['MXDEP'] = np.nan
# MW: Add visit_id
# TODO: in all places where this is used change to use sample_id instead and remove this
self.all_data['visit_id_str'] = self.all_data[self.wb_id_header] + \
self.all_data['POSITION'] + \
self.all_data['SDATE'] + \
self.all_data['STIME']
for col in self.all_data.columns:
if col.startswith('Q_'):
par = col[2:]
self.all_data[par] = self.all_data[par].apply(float_convert)
self.all_data[col] = self.all_data[col].apply(str_convert)
# TODO: send info to user
elif col in ['DIN', 'CPHL_BTL', 'CPHL_SAT','WADEP', 'MNDEP', 'MXDEP']:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif col in self.float_parameters:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif self.wb_id_header == 'VISS_EU_CD' and col == self.wb_id_header:
self.all_data[col] = self.all_data[col].apply(lambda x: 'SE' + x if 'SE' not in x else x)
else:
pass
self.all_data['STIME'] = self.all_data['STIME'].apply(lambda x: x[:5])
# MW 20180716
# TODO: Speed up, problem here areaf ew data with day 00. Maybe find those and exclude and then do pd.to_datetime
try:
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
except ValueError:
remove_index = []
for row_index in self.all_data.index:
try:
pd.to_datetime(self.all_data.iloc[row_index].SDATE)
except ValueError:
#self.all_data.loc[row_index, 'SDATE'] = ''
remove_index.append(row_index)
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data.iloc[remove_index], 'removed__before_saving_all_data')
self.all_data.drop(remove_index, inplace = True)
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
# MW: Add prioritized salinity
self._add_prioritized_parameter('SALT', 'SALT_BTL', 'SALT_CTD')
# MW: Add prioritized temperature
self._add_prioritized_parameter('TEMP', 'TEMP_BTL', 'TEMP_CTD')
# MW: Add prioritized oxygen
self._add_prioritized_parameter('DOXY', 'DOXY_BTL', 'DOXY_CTD')
if 'CPHL_BTL' in self.all_data.columns:
# MW: Add integrated chlorophyll from CHPL_BTL
self._add_integrated_calc(use_par='CPHL_BTL',
new_par='CPHL_INTEG_CALC',
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2)
self._add_waterbody_area_info()
sld_object.save_df(self.all_data, file_name='all_data', force_save_txt=True, only_pkl=False) # 20180525 by <NAME>
filetype = 'txt'
print('all_data loaded from txt and new parameters added')
return True, filetype
#==========================================================================
def _add_prioritized_parameter(self, new_par, primary_par, secondary_par, exclude_qf=['B', 'S']):
"""
Created: 20180413 by <NAME>
Last modified: 20180419 by <NAME>
Adds the parameter <new_par_name> by combining the parameters in args.
The first parameter in args that is not have a quality flag listed in exclude_qf
will be prioritized.
Three columns are added to self.all_data:
<new_par_name>
Q_<new_par_name>
source_<new_par_name>
"""
t0 = time.time()
primary_par_qf = 'Q_' + primary_par
secondary_par_qf = 'Q_' + secondary_par
q_new_par = 'Q_'+new_par
source_new_par = 'source_'+new_par
if not all([True if item in self.all_data.columns else False \
for item in [primary_par, primary_par_qf, secondary_par, secondary_par_qf]]):
if all([True if item in self.all_data.columns else False \
for item in [primary_par, secondary_par]]):
print('both parameters {} and {} in data but no q_flags'.format(primary_par, secondary_par))
elif primary_par in self.all_data.columns and secondary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[primary_par].copy()
self.all_data[source_new_par] = primary_par
return True
elif secondary_par in self.all_data.columns and primary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[secondary_par].copy()
self.all_data[source_new_par] = secondary_par
return True
else:
return False
self.all_data[new_par] = np.nan
self.all_data[q_new_par] = ''
self.all_data[source_new_par] = ''
# Find where primary is valid
primary_valid = ~pd.isnull(self.all_data[primary_par]) & \
~self.all_data[primary_par_qf].isin(exclude_qf)
# Add where primary is valid
self.all_data.loc[primary_valid, new_par] = self.all_data.loc[primary_valid, primary_par]
self.all_data.loc[primary_valid, q_new_par] = self.all_data.loc[primary_valid, primary_par_qf]
self.all_data.loc[primary_valid, source_new_par] = primary_par
# Find where primary is not valid and secondary is
add_secondary_valid = ~pd.isnull(self.all_data[secondary_par]) & \
~self.all_data[secondary_par_qf].isin(exclude_qf) & \
~primary_valid
# Add where primary is not valid and secondary is
self.all_data.loc[add_secondary_valid, new_par] = self.all_data.loc[add_secondary_valid, secondary_par]
self.all_data.loc[add_secondary_valid, q_new_par] = self.all_data.loc[add_secondary_valid, secondary_par_qf]
self.all_data.loc[add_secondary_valid, source_new_par] = secondary_par
print('time for _add_prioritized_parameter {} is: {}'.format(new_par, time.time()-t0))
def _add_waterbody_area_info(self):
pass
# This is done in DataFrameHandler, but why not here?
#TODO:
# add if MS_CD, VISS_EU_CD; not in df.columns add them from vfk-kod kolumn
# wb_id_list = self.all_data[self.wb_id_header].tolist()
# # wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id_list[0])
# if 'WATER_DISTRICT_CODE' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id)
# new_list.append(wd_id)
# self.all_data['WATER_DISTRICT_CODE'] = new_list
# if 'WATER_DISTRICT_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_name = self.mapping_objects['water_body'].get_waterdistrictname_for_water_body(wb_id)
# new_list.append(wd_name)
# self.all_data['WATER_DISTRICT_NAME'] = new_list
# if 'WATER_TYPE_AREA' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# type_name = self.mapping_objects['water_body'].get_type_area_name_for_water_body(wb_id)
# new_list.append(type_name)
# self.all_data['WATER_TYPE_AREA'] = new_list
# if 'WATER_BODY_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wb_name = self.mapping_objects['water_body'].get_name_for_water_body(wb_id)
# new_list.append(wb_name)
# self.all_data['WATER_BODY_NAME'] = new_list
#===========================================================================
def get_exclude_index_array(self, df):
"""
Created: 20180423 by <NAME>
Last modified: 20180423 by <NAME>
"""
exclude_list = []
for col in df.columns:
if 'Q_' in col:
exclude_list.append(col[2:])
exclude_list.append(col)
elif 'source' in col:
exclude_list.append(col)
elif 'DIN' in col:
exclude_list.append(col)
elif 'DEPH' in col:
exclude_list.append(col)
exclude_index_list = [True if par in exclude_list else False for par in df.columns]
return np.array(exclude_index_list)
#===========================================================================
def _add_integrated_calc(self,
use_par=None, | |
Shuts down the server
:param server_id: Id of the server to be shut down
:type: ``str``
:param method: Method of shutting down "SOFTWARE" or "HARDWARE"
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
shutdown_body = {
'action': 'POWER_OFF',
'method': method
}
response = self.connection.request(
action='servers/%s/status/action' % (server_id),
data=shutdown_body,
method='PUT',
)
return self._to_node(response.object)
"""
Image operations
"""
def ex_get_server_image(self, server_id):
"""
Gets server image
:param server_id: Id of the server
:type: ``str``
:return: Server image
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/image' % server_id,
method='GET'
)
return response.object
def ex_reinstall_server_image(self, server_id, image_id, password=None):
"""
Installs a new image on the server
:param server_id: Id of the server
:type: ``str``
:param image_id: Id of the image (Server Appliance)
:type: ``str``
:param password: <PASSWORD> for the server
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {
'id': image_id,
}
if password is not None:
body['password'] = password
response = self.connection.request(
action='servers/%s/image' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
"""
Server IP operations
"""
def ex_list_server_ips(self, server_id):
"""
Gets all server IP objects
:param server_id: Id of the server
:type: ``str``
:return: List of server IP objects
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='servers/%s/ips' % server_id,
method='GET'
)
return response.object
def ex_get_server_ip(self, server_id, ip_id):
"""
Get a single server IP object
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:return: IP address object
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/ips/%s' % (server_id, ip_id),
method='GET'
)
return response.object
def ex_assign_server_ip(self, server_id, ip_type):
"""
Assigns a new IP address to the server
:param server_id: Id of the server
:type: ``str``
:param ip_type: Type of the IP address [IPV4,IPV6]
:type: ``str``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {
'type': ip_type
}
response = self.connection.request(
action='servers/%s/ips' % server_id,
data=body,
method='POST'
)
return self._to_node(response.object)
def ex_remove_server_ip(self, server_id, ip_id, keep_ip=None):
"""
Removes an IP address from the server
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:param keep_ip: Indicates whether IP address will be removed from
the Cloud Panel
:type: ``boolean``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {}
if keep_ip is not None:
body['keep_ip'] = keep_ip
response = self.connection.request(
action='servers/%s/ips/%s' % (server_id, ip_id),
data=body,
method='DELETE'
)
return self._to_node(response.object)
def ex_get_server_firewall_policies(self, server_id, ip_id):
"""
Gets a firewall policy of attached to the server's IP
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:return: IP address object
:rtype: ``dict``
"""
response = self.connection.request(
action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id),
method='GET'
)
return response.object
def ex_add_server_firewall_policy(self, server_id, ip_id, firewall_id):
"""
Adds a firewall policy to the server's IP address
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:param firewall_id: ID of the firewall policy
:type: ``str``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {
'id': firewall_id
}
response = self.connection.request(
action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id),
data=body,
method='POST'
)
return self._to_node(response.object)
"""
Firewall Policy operations
"""
def ex_create_firewall_policy(self, name, rules, description=None):
"""
Creates a firewall Policy.
:param name:
:param description:
:param rules:
:rtype: `dict`
:return: `dict` firewall policy
"""
body = {
'name': name
}
if description is not None:
body['description'] = description
if len(rules) == 0:
raise ValueError(
'At least one firewall rule is required.'
)
else:
body['rules'] = rules
response = self.connection.request(
action='firewall_policies',
data=body,
method='POST',
)
return response.object
def ex_list_firewall_policies(self):
""""
List firewall policies
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policies',
method='GET'
)
return response.object
def ex_get_firewall_policy(self, fw_id):
"""
Gets firewall policy
:param fw_id: ID of the firewall policy
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policy/%s' % fw_id,
method='GET'
)
return response.object
def ex_delete_firewall_policy(self, fw_id):
"""
Deletes firewall policy
:param fw_id: ID of the Firewall
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policy/%s' % fw_id,
method='DELETE'
)
return response.object
"""
Shared storage operations
"""
def ex_list_shared_storages(self):
"""
List of shared storages
:return: 'dict'
"""
response = self.connection.request(
action='shared_storages',
method='GET'
)
return response.object
def ex_get_shared_storage(self, storage_id):
"""
Gets a shared storage
:return: 'dict'
"""
response = self.connection.request(
action='shared_storages/%s' % (storage_id),
method='GET'
)
return response.object
def ex_create_shared_storage(self, name, size, datacenter_id=None,
description=None):
"""
Creates a shared storage
:param name: Name of the storage
:param size: Size of the storage
:param datacenter_id: datacenter where storage should be created
:param description: description ot the storage
:return: 'dict'
"""
body = {
'name': name,
'size': size,
'datacenter_id': datacenter_id
}
if description is not None:
body['description'] = description
response = self.connection.request(
action='shared_storages',
data=body,
method='POST'
)
return response.object
def ex_delete_shared_storage(self, storage_id):
"""
Removes a shared storage
:param storage_id: Id of the shared storage
:type: ``str``
:return: Instnace of shared storage
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='shared_storages/%s' % storage_id,
method='DELETE'
)
return response.object
def ex_attach_server_to_shared_storage(self, storage_id,
server_id, rights):
"""
Attaches a single server to a shared storage
:param storage_id: Id of the shared storage
:param server_id: Id of the server to be attached to the shared storage
:param rights:
:return:
:rtype: 'dict'
"""
body = {
'severs': [
{
'id': server_id,
'rights': rights
}
]
}
response = self.connection.request(
action='shared_storages/%s/servers' % storage_id,
data=body,
method='POST'
)
return response.object
def ex_get_shared_storage_server(self, storage_id, server_id):
"""
Gets a shared storage's server
:param storage_id:
:param server_id:
:return:
"""
response = self.connection.request(
action='shared_storages/%s/servers/%s' % (storage_id, server_id),
)
return response.object
def ex_detach_server_from_shared_storage(self, storage_id,
server_id):
"""
Detaches a server from shared storage
:param storage_id: Id of the shared storage
:type: ``str``
:param server_id: Id of the server
:type: ``str``
:return: Instance of shared storage
:rtype: ``dict``
"""
response = self.connection.request(
action='shared_storages/%s/servers/%s' % (storage_id, server_id),
method='DELETE'
)
return response.object
"""
Load Balancers operations
"""
def ex_create_load_balancer(self, name, method, rules,
persistence=None,
persistence_time=None,
health_check_test=None,
health_check_interval=None,
health_check_path=None,
health_check_parser=None,
datacenter_id=None,
description=None):
"""
:param name: Name of the load balancer
:param method: Load balancer method
:param rules: Load balancer rules
:type rules: ``list`` of ``dict``
:param persistence: Indictes if persistance is set
:type persistence: ``boolean``
:param persistence_time: Persistance time
:type persistence_time: ``int``
:param health_check_test: Type of test
:type health_check_test:``str``
:param health_check_interval: Interval of the check
:param health_check_path: Path
:type health_check_path: ``str``
:param health_check_parser: Parser
:type health_check_parser:``str``
:param datacenter_id: Data center id
:type datacenter_id:``str``
:param description: Description of load balancer
:type description:``str``
:return: ``dict``
"""
body = {
'name': name,
'method': method,
}
body['rules'] = []
body['rules'] = rules
if persistence is not None:
body['persistence'] = persistence
if persistence_time is not None:
body['persistence_time'] = persistence_time
if health_check_test is not None:
body['health_check_test'] = health_check_test
if health_check_interval is not None:
body['health_check_interval'] = health_check_interval
if health_check_path is not None:
body['health_check_path'] = health_check_path
if health_check_parser is not None:
body['health_check_parser'] = health_check_parser
if datacenter_id is not None:
body['datacenter_id'] = datacenter_id
if description is not None:
body['description'] = description
response = self.connection.request(
action='load_balancers',
data=body,
method='POST'
)
return response.object
def ex_update_load_balancer(self, lb_id, name=None, description=None,
health_check_test=None,
health_check_interval=None,
persistence=None,
persistence_time=None,
method=None):
body = {}
if name is not None:
body['name'] = name
if description is not None:
body['description'] = description
if health_check_test is not None:
body['health_check_test'] = health_check_test
if health_check_interval is not None:
body['health_check_interval'] = health_check_interval
if persistence is not None:
body['persistence'] = persistence
if persistence_time is not None:
body['persistence_time'] = persistence_time
if method is not None:
body['method'] = method
response = self.connection.request(
action='load_balancers/%s' % lb_id,
data=body,
method='PUT'
)
return response.object
def ex_add_servers_to_load_balancer(self, lb_id, server_ips=[]):
"""
Adds server's IP address to load balancer
:param lb_id: Load balancer ID
:type: ``str``
:param server_ips: Array of server IP IDs
:type: ``list`` of ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
body = {
'server_ips': server_ips,
}
response = self.connection.request(
action='load_balancers/%s/server_ips' % lb_id,
data=body,
method='POST'
)
return response.object
def ex_remove_server_from_load_balancer(self, lb_id, server_ip):
"""
Removes server's IP from load balancer
:param lb_id: Load balancer ID
:type: ``str``
:param server_ip: ID of the server IP
:type: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
response = self.connection.request(
action='/load_balancers/%s/server_ips/%s' % (lb_id, server_ip),
method='DELETE'
| |
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestPutRequest')
}
},
'MyService.entries_roundtrip': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields')
},
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields')
}
},
'MyService.items_put': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequest')
}
},
'MyService.items_put_container': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer')
}
}
},
'schemas': {
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestAllFields': {
'description': 'Contains all field types.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestAllFields',
'properties': {
'bool_value': {
'type': 'boolean'
},
'bytes_value': {
'type': 'string',
'format': 'byte'
},
'double_value': {
'format': 'double',
'type': 'number'
},
'enum_value': {
'type': 'string',
'enum': ['VAL1', 'VAL2']
},
'float_value': {
'format': 'float',
'type': 'number'
},
'int32_value': {
'format': 'int32',
'type': 'integer'
},
'int64_value': {
'format': 'int64',
'type': 'string'
},
'string_value': {
'type': 'string'
},
'uint32_value': {
'format': 'uint32',
'type': 'integer'
},
'uint64_value': {
'format': 'uint64',
'type': 'string'
},
'sint32_value': {
'format': 'int32',
'type': 'integer'
},
'sint64_value': {
'format': 'int64',
'type': 'string'
},
'message_field_value': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestNested'),
'description': ('Message class to be used in a '
'message field.'),
},
'datetime_value': {
'format': 'date-time',
'type': 'string'
},
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestEntryPublishRequest': {
'description': ('Message with two required params, '
'one in path, one in body.'),
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequest'),
'properties': {
'entryId': {
'required': True,
'type': 'string'
},
'title': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
(_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequestForContainer'): {
'description': ('Message with two required params, '
'one in path, one in body.'),
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequestForContainer'),
'properties': {
'title': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestItemsPutRequest': {
'description': 'Message with path params and a body field.',
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequest'),
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
},
'entryId': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
(_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer'): {
'description': 'Message with path params and a body field.',
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer'),
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
},
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestNested': {
'description': 'Message class to be used in a message field.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestNested',
'properties': {
'int_value': {
'format': 'int64',
'type': 'string'
},
'string_value': {
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestPutRequest': {
'description': 'Message with just a body field.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestPutRequest',
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
}
},
'type': 'object'
},
'ProtorpcMessageTypesVoidMessage': {
'description': 'Empty message.',
'id': 'ProtorpcMessageTypesVoidMessage',
'properties': {},
'type': 'object'
}
}
}
expected_adapter = {
'bns': 'https://example.appspot.com/_ah/api',
'type': 'lily',
'deadline': 10.0}
test_util.AssertDictEqual(expected, api['methods'], self)
test_util.AssertDictEqual(expected_descriptor, api['descriptor'], self)
test_util.AssertDictEqual(expected_adapter, api['adapter'], self)
self.assertEqual('Describes MyService.', api['description'])
methods = api['descriptor']['methods']
self.assertTrue('MyService.entries_get' in methods)
self.assertTrue('MyService.entries_put' in methods)
self.assertTrue('MyService.entries_process' in methods)
self.assertTrue('MyService.entries_nested_collection_action' in methods)
def testEmptyRequestNonEmptyResponse(self):
class MyResponse(messages.Message):
bool_value = messages.BooleanField(1)
int32_value = messages.IntegerField(2)
@api_config.api(name='root', version='v1', hostname='example.appspot.com')
class MySimpleService(remote.Service):
@api_config.method(message_types.VoidMessage, MyResponse,
name='entries.get')
def entries_get(self, request):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
MySimpleService))
expected_request = {
'body': 'empty'
}
expected_response = {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'
}
test_util.AssertDictEqual(
expected_response, api['methods']['root.entries.get']['response'], self)
test_util.AssertDictEqual(
expected_request, api['methods']['root.entries.get']['request'], self)
def testEmptyService(self):
@api_config.api('root', 'v1', hostname='example.appspot.com')
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
self.assertTrue('methods' not in api)
def testOptionalProperties(self):
"""Verify that optional config properties show up if they're supposed to."""
optional_props = (
('canonical_name', 'canonicalName', 'Test Canonical Name'),
('owner_domain', 'ownerDomain', 'google.com'),
('owner_name', 'ownerName', 'Google'),
('package_path', 'packagePath', 'cloud/platform'),
('title', 'title', 'My Root API'),
('documentation', 'documentation', 'http://link.to/docs'))
# Try all combinations of the above properties.
for length in range(1, len(optional_props) + 1):
for combination in itertools.combinations(optional_props, length):
kwargs = {}
for property_name, _, value in combination:
kwargs[property_name] = value
@api_config.api('root', 'v1', **kwargs)
class MyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
for _, config_name, value in combination:
self.assertEqual(api[config_name], value)
# If the value is not set, verify that it's not there.
for property_name, config_name, value in optional_props:
@api_config.api('root2', 'v2')
class EmptyService2(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
EmptyService2))
self.assertNotIn(config_name, api)
def testAuth(self):
"""Verify that auth shows up in the config if it's supposed to."""
empty_auth = api_config.ApiAuth()
used_auth = api_config.ApiAuth(allow_cookie_auth=False)
cookie_auth = api_config.ApiAuth(allow_cookie_auth=True)
empty_blocked_regions = api_config.ApiAuth(blocked_regions=[])
one_blocked = api_config.ApiAuth(blocked_regions=['us'])
many_blocked = api_config.ApiAuth(blocked_regions=['CU', 'IR', 'KP', 'SD',
'SY', 'MM'])
mixed = api_config.ApiAuth(allow_cookie_auth=True,
blocked_regions=['US', 'IR'])
for auth, expected_result in ((None, None),
(empty_auth, None),
(used_auth, {'allowCookieAuth': False}),
(cookie_auth, {'allowCookieAuth': True}),
(empty_blocked_regions, None),
(one_blocked, {'blockedRegions': ['us']}),
(many_blocked, {'blockedRegions':
['CU', 'IR', 'KP', 'SD',
'SY', 'MM']}),
(mixed, {'allowCookieAuth': True,
'blockedRegions': ['US', 'IR']})):
@api_config.api('root', 'v1', auth=auth)
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
if expected_result is None:
self.assertNotIn('auth', api)
else:
self.assertEqual(api['auth'], expected_result)
def testFrontEndLimits(self):
"""Verify that frontendLimits info in the API is written to the config."""
rules = [
api_config.ApiFrontEndLimitRule(match='foo', qps=234, user_qps=567,
daily=8910, analytics_id='asdf'),
api_config.ApiFrontEndLimitRule(match='bar', qps=0, user_qps=0,
analytics_id='sdf1'),
api_config.ApiFrontEndLimitRule()]
frontend_limits = api_config.ApiFrontEndLimits(unregistered_user_qps=123,
unregistered_qps=456,
unregistered_daily=789,
rules=rules)
@api_config.api('root', 'v1', frontend_limits=frontend_limits)
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
self.assertIn('frontendLimits', api)
self.assertEqual(123, api['frontendLimits'].get('unregisteredUserQps'))
self.assertEqual(456, api['frontendLimits'].get('unregisteredQps'))
self.assertEqual(789, api['frontendLimits'].get('unregisteredDaily'))
self.assertEqual(2, len(api['frontendLimits'].get('rules')))
self.assertEqual('foo', api['frontendLimits']['rules'][0]['match'])
self.assertEqual(234, api['frontendLimits']['rules'][0]['qps'])
self.assertEqual(567, api['frontendLimits']['rules'][0]['userQps'])
self.assertEqual(8910, api['frontendLimits']['rules'][0]['daily'])
self.assertEqual('asdf', api['frontendLimits']['rules'][0]['analyticsId'])
self.assertEqual('bar', api['frontendLimits']['rules'][1]['match'])
self.assertEqual(0, api['frontendLimits']['rules'][1]['qps'])
self.assertEqual(0, api['frontendLimits']['rules'][1]['userQps'])
self.assertNotIn('daily', api['frontendLimits']['rules'][1])
self.assertEqual('sdf1', api['frontendLimits']['rules'][1]['analyticsId'])
def testAllCombinationsRepeatedRequiredDefault(self):
# TODO(kdeus): When the backwards compatibility for non-ResourceContainer
# parameters requests is removed, this class and the
# accompanying method should be removed.
class AllCombinations(messages.Message):
"""Documentation for AllCombinations."""
string = messages.StringField(1)
string_required = messages.StringField(2, required=True)
string_default_required = messages.StringField(3, required=True,
default='Foo')
string_repeated = messages.StringField(4, repeated=True)
enum_value = messages.EnumField(SimpleEnum, 5, default=SimpleEnum.VAL2)
all_combinations_container = resource_container.ResourceContainer(
**{field.name: field for field in AllCombinations.all_fields()})
@api_config.api('root', 'v1', hostname='example.appspot.com')
class MySimpleService(remote.Service):
@api_config.method(AllCombinations, message_types.VoidMessage,
path='foo', http_method='GET')
def get(self, unused_request):
return message_types.VoidMessage()
@api_config.method(all_combinations_container, message_types.VoidMessage,
name='getContainer',
path='bar', http_method='GET')
def get_container(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(
MySimpleService))
get_config = {
'httpMethod': 'GET',
'path': 'foo',
'request': {
'body': 'empty',
'parameterOrder': [
'string_required',
'string_default_required',
],
'parameters': {
'enum_value': {
'default': 'VAL2',
'type': 'string',
'enum': {
'VAL1': {
'backendValue': 'VAL1',
},
'VAL2': {
'backendValue': 'VAL2',
},
},
},
'string': {
'type': 'string',
},
'string_default_required': {
'default': 'Foo',
'required': True,
'type': 'string',
},
'string_repeated': {
'type': 'string',
'repeated': True,
},
'string_required': {
'required': True,
'type': 'string',
},
},
},
'response': {
'body': 'empty',
},
'rosyMethod': 'MySimpleService.get',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
'useRequestUri': False,
}
get_container_config = get_config.copy()
get_container_config['path'] = 'bar'
get_container_config['rosyMethod'] = 'MySimpleService.get_container'
expected = {
'root.get': get_config,
'root.getContainer': get_container_config
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesSingleApi(self):
"""Test an API that's split into multiple classes."""
root_api = api_config.api('root', '1.5.6', hostname='example.appspot.com')
# First class has a request that reads some arguments.
class Response1(messages.Message):
string_value = messages.StringField(1)
@root_api.api_class(resource_name='request')
class RequestService(remote.Service):
@api_config.method(message_types.VoidMessage, Response1,
path='request_path', http_method='GET')
def my_request(self, unused_request):
pass
# Second class, no methods.
@root_api.api_class(resource_name='empty')
class EmptyService(remote.Service):
pass
# Third class (& data), one method that returns a response.
class Response2(messages.Message):
bool_value = messages.BooleanField(1)
int32_value = messages.IntegerField(2)
@root_api.api_class(resource_name='simple')
class MySimpleService(remote.Service):
@api_config.method(message_types.VoidMessage, Response2,
name='entries.get', path='entries')
def EntriesGet(self, request):
pass
# Make sure api info is the same for all classes and all the _ApiInfo
# properties are accessible.
for cls in (RequestService, EmptyService, MySimpleService):
self.assertEqual(cls.api_info.name, 'root')
self.assertEqual(cls.api_info.api_version, '1.5.6')
self.assertEqual(cls.api_info.path_version, 'v1')
self.assertEqual(cls.api_info.hostname, 'example.appspot.com')
self.assertIsNone(cls.api_info.audiences)
self.assertEqual(cls.api_info.allowed_client_ids,
[API_EXPLORER_CLIENT_ID])
self.assertEqual(cls.api_info.scopes, [api_config.EMAIL_SCOPE])
# Get the config for the combination of all 3.
api = json.loads(self.generator.pretty_print_config_to_json(
[RequestService, EmptyService, MySimpleService]))
expected = {
'root.request.my_request': {
'httpMethod': 'GET',
'path': 'request_path',
'useRequestUri': False,
'request': {'body': 'empty'},
'response': {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'RequestService.my_request',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.simple.entries.get': {
'httpMethod': 'POST',
'path': 'entries',
'useRequestUri': False,
'request': {'body': 'empty'},
'response': {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'MySimpleService.EntriesGet',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
expected_descriptor = {
'methods': {
'MySimpleService.EntriesGet': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse2')
}
},
'RequestService.my_request': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse1')
}
}
},
'schemas': {
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1',
'properties': {
'string_value': {
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2',
'properties': {
'bool_value': {
'type': 'boolean'
},
'int32_value': {
'format': 'int64',
'type': 'string'
}
},
'type': 'object'
}
}
}
test_util.AssertDictEqual(expected_descriptor, api['descriptor'], self)
def testMultipleClassesDifferentDecoratorInstance(self):
"""Test that using different instances of @api fails."""
root_api1 = api_config.api('root', 'v1', hostname='example.appspot.com')
root_api2 = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api1.api_class()
class EmptyService1(remote.Service):
pass
@root_api2.api_class()
class EmptyService2(remote.Service):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
| |
# -o--
"""
MOSMusic.py (module)
Initial focus: Generate MIDI scale sequences.
PUBLIC ENUMS--
. Key
. MIDINote
. Direction
. ModeNames
PUBLIC DICTIONARIES--
. Modes
. ModesAdditional
. Ornaments
HELPER FUNCTIONS--
. subdivisionPerBeat()
. findScaleForMode()
GENERATOR FUNCTIONS--
. generateScaleSequence()
. generateOrnament()
. generateOrnamentReset()
PROTECTED FUNCTIONS--
. _discoverMIDINoteScaleIndexInMode()
. _translateOrnamentScaleToMIDI()
"""
#---------------------------------------------------------------------
# Copyright (C) <NAME> 2021. <EMAIL>
# Distributed under the Boost Software License, Version 1.0.
# (See ./LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
#---------------------------------------------------------------------
version :str = "0.1" #RELEASE
#----------------------------------------- -o--
# Modules.
from enum import IntEnum
import random
from typing import Any, Dict, List, Union
#
import MOSLog
log = MOSLog.MOSLog(logTime=False, logDate=False)
#import MOSDump as dump
import MOSZ as z
#----------------------------------------- -o--
# Globals.
SECONDS_PER_MINUTE :float = 60 #CONSTANT
NOTES_PER_OCTAVE :int = 12 #CONSTANT
#----------------------------------------- -o--
# Enum and Dict types to capture well-known constants.
# -o-
class Key(IntEnum):
"""
Key offset starting with C=0.
Enum keys "Kx" where x is "s" for sharp, "f" for flat.
"""
C = 0
Cs = 1
Cf = 11
D = 2
Ds = 3
Df = 1
E = 4
Es = 5
Ef = 3
F = 5
Fs = 6
Ff = 4
G = 7
Gs = 8
Gf = 6
A = 9
As = 10
Af = 8
B = 11
Bs = 0
Bf = 10
# -o-
class MIDINote(IntEnum):
"""
Common key indices.
"""
C1 = 24
C2 = 36
C3 = 48
C4 = 60
C5 = 72
C6 = 84
C7 = 96
C8 = 108
# -o-
class Direction(IntEnum):
"""
Up, down, up/down, down/up.
"""
UP = 1
DOWN = -1
UPDOWN = 2 #XXX
DOWNUP = -2
#
class ModeNames(IntEnum):
ionian = 1
dorian = 2
phrygian = 3
lydian = 4
mixolydian = 5
aeolian = 6
locrian = 7
major = 10
harmonicMinor = 11
minor = 12
pentatonic = 15
# -o-
# See findScaleForMode().
#
Modes :Dict[ModeNames,List] = {
ModeNames.ionian : [0, 2, 4, 5, 7, 9, 11], # - - v - - - v
ModeNames.dorian : [0, 2, 3, 5, 7, 9, 10], # - v - - - v -
ModeNames.phrygian : [0, 1, 3, 5, 7, 8, 10], # v - - - v - -
ModeNames.lydian : [0, 2, 4, 6, 7, 9, 11], # - - - v - - v
ModeNames.mixolydian : [0, 2, 4, 5, 7, 9, 10], # - - v - - v -
ModeNames.aeolian : [0, 2, 3, 5, 7, 8, 10], # - v - - v - -
ModeNames.locrian : [0, 1, 3, 5, 6, 8, 10], # v - - v - - -
}
ModesAdditional :Dict[ModeNames,List] = {
ModeNames.major : Modes[ModeNames.ionian], # - - v - - - v
ModeNames.harmonicMinor : [0, 2, 3, 5, 7, 8, 11], # - v - - v -v
ModeNames.minor : Modes[ModeNames.aeolian], # - v - - v - -
ModeNames.pentatonic : [0, 2, 4, 6, 8, 10], # - - - - -
}
# Scale note offsets from current note in scale.
#
# * Offsets serve as indices into mode definitions allowing translation
# of ornaments into specific MIDI notes per starting pitch, key, and mode.
# * Zero(0) is current note, NOT ROOT. Offsets are relative to current note.
# * If a scale has N notes, then N represents the octave. (Eg: 6 is the octave for a pentatonic scale.)
# * Duration of notes within the ornament, its syncopation, interpretation, &c
# are implemented by the receiver of the ornament data. Sender and receiver are
# ASSUMED to have a shared sense of beats per minute (BPM).
# * Ornament and scale length mismatches may result in incorrect ornament translation.
#
# sixteenthsLeadIn
# One beat: Eighth rest + two(2) sixteenth rising adjacent leading tones to MIDINote (1 note).
# Subdivision: Sixteenths.
#
# sixteenthsTripletTurnaround
# Two beats: Sixteenth triplet ornament beginning with <MIDINote> (6 notes), followed by eighth note denouement (2 notes).
# Subdivision: Sixteenth triplets.
#
# sixteenthPop
# Two beats: Octave jump from <MIDINote> on sixteenth offbeats (3 notes), followed by eighth note "6, 5" (2 notes).
# Subdivision: Sixteenths.
#
#
# Dictionary value format: [ subdivision, [ ornamentScaleTones... ] ]
#
Ornaments :Dict[str,List[Any]] = {
"sixteenthLeadIn" : [4, [ -2, -1, 0 ]],
"sixteenthTripletTurnaround" : [6, [ 0, 1, 0, -1, -2, -3, 7, 6 ]],
"sixteenthPop" : [4, [ 0, 7, 7, 3, 5 ]],
}
# Used by generateOrnament*().
#
ornamentState :Dict[str,int] = {
"sixteenthLeadIn" : 0,
"sixteenthTripletTurnaround" : 0,
"sixteenthPop" : 0,
}
#----------------------------------------- -o--
# Helper functions.
# -o-
def subdivisionPerBeat(bpm:float, subdivision:float) -> float: #UNUSED
return (SECONDS_PER_MINUTE / bpm / subdivision)
# -o-
def findScaleForMode(mode:ModeNames) -> Union[List[int],None]:
if mode in Modes:
return Modes[mode]
elif mode in ModesAdditional:
return ModesAdditional[mode]
return None
#----------------------------------------- -o--
# Generators.
# RETURN Infinite sequences of... scale starting at key within an
# octave, run up or down for range octaves.
#
def generateScaleSequence( key :Key = Key.C,
mode :ModeNames = ModeNames.ionian,
octave :MIDINote = MIDINote.C4,
octaveRange :int = 2,
direction :Direction = Direction.UP,
scaleEndBoundByRoot :bool = True,
) -> int:
lowNote :int = -1
scaleForMode :list = findScaleForMode(mode)
listOfNotes :list = []
#
if (octaveRange < 1) or (octaveRange > 6): #XXX
log.critical(f"{log.defName()}: octaveRange OUT OF BOUNDS ({octaveRange}).")
if None is scaleForMode:
log.critical(f"mode is named, but NOT DEFINED. ({mode})")
lowNote = octave.value + key.value
# Determine range, regardless of direction.
#
if direction in [Direction.DOWN, Direction.DOWNUP]:
lowNote -= (octaveRange * NOTES_PER_OCTAVE)
for octa in range(lowNote, lowNote + (octaveRange * NOTES_PER_OCTAVE), NOTES_PER_OCTAVE):
for pitch in scaleForMode:
listOfNotes.append(octa + pitch)
if scaleEndBoundByRoot:
listOfNotes.append(octa + NOTES_PER_OCTAVE)
# Iterate.
#
if direction in [Direction.DOWN, Direction.DOWNUP]:
listOfNotes.reverse()
index = iter(listOfNotes)
while True:
try:
yield index.__next__()
except StopIteration:
if direction in [Direction.UP, Direction.DOWN]:
index = iter(listOfNotes)
else:
listOfNotes.reverse()
index = iter(listOfNotes)
index.__next__()
#NB Avoid duplication of first note, which caused StopIteration.
#ENDDEF -- generateScaleSequence
# -o-
def generateOrnament(fromMIDINote:int, key:Key, mode:ModeNames, bpm:float) -> Union[List[Any],None]:
"""
Generate OSC arguments describing ornaments, with the form:
[ <ornamentName> <BPM> <beatSubdivision> [<listOfOrnamentNoteMIDIOffsets...>] ]
ASSUME This function is called on every beat, or with some organic
regularity so output over time is roughly consistent with itself.
Maintain module internal state to govern frequency and type of ornaments produced.
Random filters to manage internal state are arbitrrary, specific and experimental. YMMV.
Call generateOrnamentReset() to reset ornament module internal state.
"""
ornamentChoice :str = None
ornamentBlob :List[Any] = None
oscArgs :List[Any] = []
fourCount :int = 4
global ornamentState
#
if ornamentState["sixteenthTripletTurnaround"] > 0: # Check existing state.
ornamentState["sixteenthTripletTurnaround"] -= 1
if ornamentState["sixteenthTripletTurnaround"] == 2:
if z.percentTrue(35):
ornamentChoice = "sixteenthPop"
if not ornamentChoice:
if z.percentTrue(70): return None # Frequency to bypass ornaments.
ornamentChoice = random.choice(list(Ornaments.keys()))
#
if "sixteenthLeadIn" == ornamentChoice:
pass
elif "sixteenthPop" == ornamentChoice:
if ornamentState["sixteenthTripletTurnaround"] > 0 \
and ornamentState["sixteenthTripletTurnaround"] != 2:
return None
elif "sixteenthTripletTurnaround" == ornamentChoice:
# Generate no more often than once every fourCount.
# Optionally generate "sixteenthPop" at half-way (above).
#
if ornamentState["sixteenthTripletTurnaround"] > 0:
return None
ornamentState["sixteenthTripletTurnaround"] = fourCount
else:
log.error(f"UNRECOGNIZED ornament choice. ({ornamentChoice})")
return None
#
ornamentBlob = _translateOrnamentScaleToMIDI(ornamentChoice, fromMIDINote, key, mode)
if not ornamentBlob: return None
oscArgs = [ornamentChoice, bpm, ornamentBlob[0]]
oscArgs += ornamentBlob[1]
return oscArgs
# -o-
def generateOrnamentReset() -> None:
global ornamentState
ornamentState = ornamentState.fromkeys(ornamentState.keys(), 0)
#----------------------------------------- -o--
# Protected functions.
# -o-
def _discoverMIDINoteScaleIndexInMode(midiNote:int, key:Key, mode:ModeNames) -> Union[int,None]:
"""
RETURN Scale tone of midiNote in mode, or None if it cannot be found.
NB Range of scale tone depends upon the mode (generally 0-7). Zero(0) is root.
Scale tone is also the index into scale array.
"""
listOfRegisters :list = None
register :int = -1
scaleForMode :List = findScaleForMode(mode)
scaleIndex :int = 0
#
if (midiNote < MIDINote.C1) or (midiNote > MIDINote.C8):
log.error(f"midiNote is OUT OF RANGE. ({midiNote})")
return None
if None is scaleForMode:
log.error(f"mode is UNRECOGNIZED. ({mode})")
return None
#
listOfRegisters = list(MIDINote)
register = listOfRegisters.pop(0)
for reg in listOfRegisters:
if midiNote < reg: break
register = reg
register += key
while scaleIndex < len(scaleForMode):
if midiNote == (register + scaleForMode[scaleIndex]):
return scaleIndex
scaleIndex += 1
log.debug("hi3")
return None
# -o-
def _translateOrnamentScaleToMIDI(ornamentName:str, fromMIDINote:int, key:Key, mode:ModeNames) -> Union[List[Any],None]:
| |
<reponame>SuperMap/iClientPython
from unittest import TestCase, mock
from iclientpy.portal import Portal, MapShareSettingBuilder, DataShareSettingBuilder
from iclientpy.rest.api.model import GetMapsResult, ViewerMap, MethodResult, MyDatasMethodResult, DataItem, Status, \
DataItemType, MyDataUploadProcess, Layer, GetGroupsResult, PermissionType, DataPermissionType, EntityType, \
GetMyDatasResult, UserInfo, RoleEntity
from io import FileIO
from pandas import DataFrame
class MockiPortalAPIFactory:
def __init__(self, url, *args, **kwargs):
self._base_url = url
pass
@mock.patch("iclientpy.portal.iPortalAPIFactory", MockiPortalAPIFactory)
class PortalTestCase(TestCase):
def test_search_map(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
result = GetMapsResult()
result.content = []
maps_service = mock.MagicMock()
maps_service.get_maps = mock.MagicMock(return_value=result)
portal._portal.maps_service = mock.MagicMock(return_value=maps_service)
result = portal.search_map()
self.assertEqual(result, [])
def test_get_map(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
m = ViewerMap()
maps_service = mock.MagicMock()
maps_service.get_map = mock.MagicMock(return_value=m)
portal._portal.maps_service = mock.MagicMock(return_value=maps_service)
result = portal.get_map('map_id')
self.assertEqual(result, m)
def test_upload_data(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
mdmr = MyDatasMethodResult()
mdmr.childID = 'data_id'
data_services.post_datas = mock.MagicMock(return_value=mdmr)
data_services.upload_data = mock.MagicMock(return_value=mdmr)
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
portal.get_data = mock.MagicMock()
data1 = DataItem()
data1.status = Status.CREATED
data2 = DataItem()
data2.status = Status.CREATED
data3 = DataItem()
data3.status = Status.OK
portal.get_data.side_effect = [data1, data2, data3]
portal.get_data_upload_progress = mock.MagicMock()
portal.get_data_upload_progress.side_effect = [(0, 100), (50, 100), (100, 100)]
callback = mock.MagicMock()
data_content = mock.MagicMock()
data_content.__class__ = FileIO
result = portal.upload_data('test.json', data_content, DataItemType.JSON, callback)
self.assertEqual(result, 'data_id')
def test_upload_dataframe_as_json(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
portal.upload_data = mock.MagicMock(return_value='data_id')
df = mock.MagicMock()
df.__class__ = DataFrame
df.to_json = mock.MagicMock(return_value='testtesttest')
result = portal.upload_dataframe_as_json('data', df)
self.assertEqual(result, 'data_id')
def test_get_datas(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
res = GetMyDatasResult()
res.content = []
data_services.get_datas = mock.MagicMock(return_value=res)
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
result = portal.search_data()
self.assertEqual(result, [])
def test_get_data(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data = DataItem()
data_services = mock.MagicMock()
data_services.get_data = mock.MagicMock(return_value=data)
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
result = portal.get_data('data_id')
self.assertEqual(result, data)
def test_get_data_upload_progress(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
process = MyDataUploadProcess()
process.read = 10
process.total = 100
data_services.get_upload_process = mock.MagicMock(return_value=process)
result = portal.get_data_upload_progress('data_id')
self.assertEqual(result, (10, 100))
def test_create_map(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_service = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_service)
res = MethodResult()
res.newResourceID = 'map_id';
maps_service.post_maps = mock.MagicMock(return_value=res)
result = portal.create_map([Layer()], 3857, 'map', (0, 0), (-180, -90, 180, 90))
self.assertEqual(result, 'map_id')
def test_prepare_geojson_layer(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
data_id = 'data_id'
result = portal.prepare_geojson_layer(data_id, 'layer')
self.assertEqual(result.url, 'http://localhost:8090/iportal/datas/data_id/content.json')
self.assertEqual(result.title, 'layer')
def test_search_groups(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
groups_service = mock.MagicMock()
portal._portal.groups_service = mock.MagicMock(return_value=groups_service)
res = GetGroupsResult()
res.content = []
groups_service.get_groups = mock.MagicMock(return_value=res)
result = portal.search_group()
self.assertEqual(result, [])
def test_get_data_sharesetting(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
data_services.get_data_sharesetting = mock.MagicMock(return_value=[])
result = portal.get_data_sharesetting('data_id')
self.assertEqual(result, [])
def test_config_data_sharesetting(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
res = MethodResult()
res.succeed = True
data_services.put_data_sharesetting = mock.MagicMock(return_value=res)
portal.config_data_sharesetting('data_id', [])
data_services.put_data_sharesetting.assert_called_once_with('data_id', [])
def test_config_data_sharesetting_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
res = MethodResult()
res.succeed = False
data_services.put_data_sharesetting = mock.MagicMock(return_value=res)
with self.assertRaises(Exception):
portal.config_data_sharesetting('data_id', [])
data_services.put_data_sharesetting.assert_called_once_with('data_id', [])
def test_get_map_sharesetting(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_services = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_services)
maps_services.get_map_sharesetting = mock.MagicMock(return_value=[])
result = portal.get_map_sharesetting('map_id')
self.assertEqual(result, [])
def test_config_map_sharesetting(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_services = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_services)
res = MethodResult()
res.succeed = True
maps_services.put_map_sharesetting = mock.MagicMock(return_value=res)
portal.config_map_sharesetting('map_id', [])
maps_services.put_map_sharesetting.assert_called_once_with('map_id', [])
def test_config_map_sharesetting_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_services = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_services)
res = MethodResult()
res.succeed = False
maps_services.put_map_sharesetting = mock.MagicMock(return_value=res)
with self.assertRaises(Exception):
portal.config_map_sharesetting('map_id', [])
maps_services.put_map_sharesetting.assert_called_once_with('map_id', [])
def test_delete_map(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_service = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_service)
res = MethodResult()
res.succeed = True
maps_service.delete_maps = mock.MagicMock(return_value=res)
portal.delete_map('map_id')
maps_service.delete_maps.assert_called_once_with(['map_id'])
def test_delete_maps(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
maps_service = mock.MagicMock()
portal._portal.maps_service = mock.MagicMock(return_value=maps_service)
res = MethodResult()
res.succeed = True
maps_service.delete_maps = mock.MagicMock(return_value=res)
portal.delete_maps(['map_id'])
maps_service.delete_maps.assert_called_once_with(['map_id'])
def test_delete_data(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
res = MethodResult()
res.succeed = True
data_services.delete_data = mock.MagicMock(return_value=res)
portal.delete_data('data_id')
data_services.delete_data.assert_called_once_with('data_id')
def test_delete_datas(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
data_services = mock.MagicMock()
portal._portal.datas_service = mock.MagicMock(return_value=data_services)
res = MethodResult()
res.succeed = True
data_services.delete_data = mock.MagicMock(return_value=res)
portal.delete_datas(['data_id', 'data_id2'])
self.assertEqual(data_services.delete_data.call_count, 2)
self.assertEqual(data_services.delete_data.call_args_list, [mock.call('data_id'), mock.call('data_id2')])
def test_getusers(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
users = []
managment.get_users = mock.MagicMock(return_value=users)
result = portal.get_users()
self.assertEqual(result, users)
def test_getuser(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
user = UserInfo()
managment.get_users = mock.MagicMock(return_value=user)
result = portal.get_users()
self.assertEqual(result, user)
def test_createuser(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.post_users = mock.MagicMock(return_value=re)
portal.create_user('test', 'test')
managment.post_users.assert_called_once()
def test_updateuser(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.put_user = mock.MagicMock(return_value=re)
portal.update_user('test', 'test')
managment.put_user.assert_called_once()
def test_deleteusers(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.put_users = mock.MagicMock(return_value=re)
portal.delete_users(['test'])
managment.put_users.assert_called_once()
def test_deleteuser(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.delete_user = mock.MagicMock(return_value=re)
portal.delete_user('test')
managment.delete_user.assert_called_once()
def test_createuser_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.post_users = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.create_user('test', 'test')
managment.post_users.assert_called_once()
def test_updateuser_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.put_user = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.update_user('test', 'test')
managment.put_user.assert_called_once()
def test_deleteusers_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.put_users = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.delete_users(['test'])
managment.put_users.assert_called_once()
def test_deleteuser_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.delete_user = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.delete_user('test')
managment.delete_user.assert_called_once()
def test_getroles(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
roles = []
managment.get_roles = mock.MagicMock(return_value=roles)
result = portal.get_roles()
self.assertEqual(result, roles)
def test_getrole(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
role = RoleEntity()
managment.get_role = mock.MagicMock(return_value=role)
result = portal.get_role('test')
self.assertEqual(result, role)
def test_createrole(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.post_roles = mock.MagicMock(return_value=re)
portal.create_role('test')
managment.post_roles.assert_called_once()
def test_updaterole(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.put_role = mock.MagicMock(return_value=re)
portal.update_role('test', description='test')
managment.put_role.assert_called_once()
def test_deleterole(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.delete_role = mock.MagicMock(return_value=re)
portal.delete_role('test')
managment.delete_role.assert_called_once()
def test_deleteroles(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
re.succeed = True
managment.put_roles = mock.MagicMock(return_value=re)
portal.delete_roles(['test'])
managment.put_roles.assert_called_once()
def test_createrole_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.post_roles = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.create_role('test')
managment.post_roles.assert_called_once()
def test_updaterole_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.put_role = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.update_role('test', description='test')
managment.put_role.assert_called_once()
def test_deleterole_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.delete_role = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.delete_role('test')
managment.delete_role.assert_called_once()
def test_deleteroles_exception(self):
portal = Portal('http://localhost:8090/iportal', 'admin', 'Supermap123')
portal._portal = mock.MagicMock()
managment = mock.MagicMock()
portal._portal.security_management = mock.MagicMock(return_value=managment)
re = MethodResult()
managment.put_roles = mock.MagicMock(return_value=re)
with self.assertRaises(Exception):
portal.delete_roles(['test'])
managment.put_roles.assert_called_once()
class MapShareSettingBuilderTestCase(TestCase):
def test_builder(self):
result = MapShareSettingBuilder().share_to_user("user", PermissionType.READ).share_to_department(
'department_id', PermissionType.READ).share_to_group(123,
PermissionType.READWRITE).share_to_everyone(
PermissionType.READ).share_to_users(['user1', 'user2'], PermissionType.READWRITE).build()
self.assertEqual(result[0].entityName, 'user')
self.assertEqual(result[0].permissionType, PermissionType.READ.value)
self.assertEqual(result[0].entityType, EntityType.USER)
self.assertEqual(result[1].entityId, 'department_id')
self.assertEqual(result[1].permissionType, PermissionType.READ.value)
self.assertEqual(result[1].entityType, EntityType.DEPARTMENT)
self.assertEqual(result[2].entityId, 123)
self.assertEqual(result[2].permissionType, PermissionType.READWRITE.value)
self.assertEqual(result[2].entityType, EntityType.IPORTALGROUP)
self.assertEqual(result[3].entityName, 'GUEST')
self.assertEqual(result[3].permissionType, PermissionType.READ.value)
self.assertEqual(result[3].entityType, EntityType.USER)
self.assertEqual(result[4].entityName, 'user1')
self.assertEqual(result[4].permissionType, PermissionType.READWRITE.value)
self.assertEqual(result[4].entityType, EntityType.USER)
self.assertEqual(result[5].entityName, 'user2')
self.assertEqual(result[5].permissionType, PermissionType.READWRITE.value)
self.assertEqual(result[5].entityType, EntityType.USER)
class DataShareSettingBuilderTestCase(TestCase):
def test_builder(self):
result = DataShareSettingBuilder().share_to_user('user', DataPermissionType.DOWNLOAD).share_to_everyone(
DataPermissionType.DOWNLOAD).share_to_group('group_id', DataPermissionType.DOWNLOAD).share_to_users(
['user1', 'user2'], DataPermissionType.DELETE).build()
self.assertEqual(result[0].entityName, 'user')
self.assertEqual(result[0].dataPermissionType, DataPermissionType.DOWNLOAD.value)
self.assertEqual(result[0].entityType, EntityType.USER)
self.assertEqual(result[1].entityName, 'GUEST')
| |
10
IGMPv3.fixup(pkt,invalid_ttl = set_ttl)
else:
IGMPv3.fixup(pkt)
sendp(pkt, iface=iface)
if delay != 0:
time.sleep(delay)
def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
self.onos_ssm_table_load(groups, src_list)
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
gr.sources = src_list
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
if rec_queryCount == None:
log_test.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
resp = srp1(pkt, iface=iface)
else:
log_test.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
resp = srp1(pkt, iface=iface)
resp[0].summary()
log_test.info('Sent IGMP join for group %s and received a query packet and printing packet' %groups)
if delay != 0:
time.sleep(delay)
def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
log_test.info('entering into igmp leave function')
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
sendp(pkt, iface = iface)
if delay != 0:
time.sleep(delay)
def send_igmp_leave_listening_group_specific_query(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
gaddr=self.IP_DST)
for g in groups:
gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
gr.sources = src_list
igmp.grps.append(gr)
if ip_pkt is None:
ip_pkt = self.igmp_eth/self.igmp_ip
pkt = ip_pkt/igmp
IGMPv3.fixup(pkt)
log_test.info('Sending IGMP leave for group %s and waiting for one group specific query packet and printing the packet' %groups)
resp = srp1(pkt, iface=iface)
resp[0].summary()
log_test.info('Sent IGMP leave for group %s and received a group specific query packet and printing packet' %groups)
if delay != 0:
time.sleep(delay)
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+390)
def test_netCondition_with_delay_between_igmp_join_and_data_recv(self):
self.setUp_igmp()
randomDelay = randint(10,300)
groups = ['192.168.127.12', '172.16.31.10']
self.onos_ssm_table_load(groups)
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def mcast_traffic_delay_start():
mcastTraffic.start()
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
result = self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_join(stateList)
self.df.callback(0)
self.send_igmp_join(groups)
log_test.info('Holding multicast data for a period of random delay = {} secs'.format(randomDelay))
t = Timer(randomDelay, mcast_traffic_delay_start)
t.start()
self.test_timer = reactor.callLater(randomDelay+30, self.mcast_traffic_timer)
reactor.callLater(randomDelay+10, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+390)
def test_netCondition_with_delay_between_data_recv_and_igmp_join(self):
self.setUp_igmp()
randomDelay = randint(10,300)
groups = ['192.168.127.12', '172.16.31.10']
self.onos_ssm_table_load(groups)
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def mcast_join_delay_start():
log_test.info('Holding channel join for a period of random delay = {} secs'.format(randomDelay))
self.send_igmp_join(groups)
def igmp_srp_task(stateList):
igmpSendState, igmpRecvState = stateList
if not mcastTraffic.isRecvStopped():
result = self.igmp_recv(igmpRecvState)
reactor.callLater(0, igmp_srp_task, stateList)
else:
self.mcastTraffic.stop()
self.recv_socket.close()
self.igmp_verify_join(stateList)
self.df.callback(0)
mcastTraffic.start()
t = Timer(randomDelay, mcast_join_delay_start)
t.start()
self.test_timer = reactor.callLater(randomDelay+30, self.mcast_traffic_timer)
reactor.callLater(randomDelay+10, igmp_srp_task, igmpStateList)
return df
@deferred(timeout=MCAST_TRAFFIC_TIMEOUT+340)
def test_netCondition_with_delay_between_igmp_leave_and_data(self):
self.setUp_igmp()
randomDelay = randint(10,300)
groups = ['192.168.3.11', '192.168.127.12']
leave_groups = ['192.168.3.11']
self.onos_ssm_table_load(groups)
df = defer.Deferred()
igmpState = IGMPTestState(groups = groups, df = df)
igmpStateRecv = IGMPTestState(groups = groups, df = df)
igmpStateList = (igmpState, igmpStateRecv)
mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
arg = igmpState)
self.df = df
self.mcastTraffic = mcastTraffic
self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
def mcast_leave_delay_start():
self.send_igmp_leave(leave_groups, delay = 3)
join_state = IGMPTestState(groups = leave_groups)
status = self.igmp_not_recv_task(self.V_INF1,leave_groups, join_state)
log_test.info('Verified status for igmp recv task %s'%status)
assert status == 1 , 'EXPECTED RESULT'
self.df.callback(0)
mcastTraffic.start()
self.send_igmp_join(groups)
log_test.info('Holding multicast leave packet for a period of random delay = {} secs'.format(randomDelay))
t = Timer(randomDelay+10, mcast_leave_delay_start)
t.start()
return df
def igmp_not_recv_task(self, intf, groups, join_state):
log_test.info('Entering igmp not recv task loop')
recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
group_map = {}
for g in groups:
group_map[g] = [0,0]
log_test.info('Verifying join interface should not receive any multicast data')
self.NEGATIVE_TRAFFIC_STATUS = 1
def igmp_recv_cb(pkt):
log_test.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
self.NEGATIVE_TRAFFIC_STATUS = 2
sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: IP in p and p[IP].dst in groups,
timeout = 3, opened_socket = recv_socket)
recv_socket.close()
return self.NEGATIVE_TRAFFIC_STATUS
## Its sample test case based on this test case we had added all below scenarios.
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_delay_between_positive_IdReq_and_tlsHelloReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
tls = TLSAuthTest()
def eap_tls_eapTlsHelloReq_pkt_delay():
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_verify(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_delay_between_IdReq_and_tlsHelloReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
tls = TLSAuthTest()
def eap_tls_eapTlsHelloReq_pkt_delay():
log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_verify(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TEST_TIMEOUT_DELAY+100)
def test_netCondition_in_eap_tls_with_delay_between_tlsHelloReq_and_eapTlsCertReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
tls = TLSAuthTest()
def eap_tls_eapTlsCertReq_pkt_delay():
log_test.info('Holding eapTlsCertReq packet for a period of random delay = {} secs'.format(randomDelay))
tls._eapTlsCertReq_delay()
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_verify(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
tls._eapTlsHelloReq()
while tls.server_hello_done_received == False:
r = tls.eapol_scapy_recv(cb = tls.eapol_server_hello_cb,
lfilter =
lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
pkt[EAP].code == EAP.REQUEST)
if len(r) == 0:
tls.tlsFail()
t = Timer(randomDelay, eap_tls_eapTlsCertReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_delay_between_TlsCertReq_and_TlsChangeCipherSpec(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
tls = TLSAuthTest()
def eap_tls_TlsChangeCipherSpec_pkt_delay():
log_test.info('Holding TlsChangeCipherSpec packet for a period of random delay = {} secs'.format(randomDelay))
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_verify(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
t = Timer(randomDelay, eap_tls_TlsChangeCipherSpec_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_no_cert_and_delay_between_IdReq_and_HelloReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
def tls_no_cert_cb():
log_test.info('TLS authentication failed with no certificate')
tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
def eap_tls_eapTlsHelloReq_pkt_delay():
log_test.info('Holding HelloReq packet with no cert for a period of random delay = {} secs'.format(randomDelay))
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
assert_equal(tls.failTest, True)
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_no_cert(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_no_cert, df)
return df
@deferred(TEST_TIMEOUT_DELAY+100)
def test_netCondition_in_eap_tls_with_delay_and_no_cert_between_tlsHelloReq_and_eapTlsCertReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
def tls_no_cert_cb():
log_test.info('TLS authentication failed with no certificate')
tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
def eap_tls_eapTlsHelloReq_pkt_delay():
log_test.info('Holding eapTlsCertReq packet with no cert for a period of random delay = {} secs'.format(randomDelay))
tls._eapTlsCertReq_delay()
assert_equal(tls.failTest, True)
tls._eapTlsChangeCipherSpec()
assert_equal(tls.failTest, True)
tls._eapTlsFinished()
df.callback(0)
def eap_tls_no_cert(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
tls._eapTlsHelloReq()
while tls.server_hello_done_received == False:
r = tls.eapol_scapy_recv(cb = tls.eapol_server_hello_cb,
lfilter =
lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
pkt[EAP].code == EAP.REQUEST)
if len(r) == 0:
tls.tlsFail()
t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_no_cert, df)
return df
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_delay_and_no_cert_between_TlsCertReq_and_TlsChangeCipherSpec(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
def tls_no_cert_cb():
log_test.info('TLS authentication failed with no certificate')
tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
def eap_tls_TlsChangeCipherSpec_pkt_delay():
tls._eapTlsChangeCipherSpec()
assert_equal(tls.failTest, True)
tls._eapTlsFinished()
df.callback(0)
def eap_tls_no_cert(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
log_test.info('Holding TlsChangeCipherSpec packet with no cert for a period of random delay = {} secs'.format(randomDelay))
t = Timer(randomDelay, eap_tls_TlsChangeCipherSpec_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_no_cert, df)
return df
@deferred(TEST_TIMEOUT_DELAY+50)
def test_netCondition_in_eap_tls_with_invalid_cert_and_delay_between_IdReq_and_HelloReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
def tls_invalid_cert_cb():
log_test.info('TLS authentication failed with invalid certificate')
tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb, client_cert = self.CLIENT_CERT_INVALID)
def eap_tls_eapTlsHelloReq_pkt_delay():
tls._eapTlsHelloReq()
tls._eapTlsCertReq()
assert_equal(tls.failTest, True)
tls._eapTlsChangeCipherSpec()
tls._eapTlsFinished()
df.callback(0)
def eap_tls_invalid_cert(df):
tls._eapSetup()
tls.tlsEventTable.EVT_EAP_SETUP
tls._eapStart()
tls.tlsEventTable.EVT_EAP_START
tls._eapIdReq()
tls.tlsEventTable.EVT_EAP_ID_REQ
log_test.info('Holding HelloReq packet with invalid cert for a period of random delay = {} secs'.format(randomDelay))
t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
t.start()
reactor.callLater(0, eap_tls_invalid_cert, df)
return df
@deferred(TEST_TIMEOUT_DELAY+100)
def test_netCondition_in_eap_tls_with_invalid_cert_and_delay_between_tlsHelloReq_and_eapTlsCertReq(self):
self.setUp_tls()
randomDelay = randint(10,300)
df = defer.Deferred()
def tls_invalid_cert_cb():
log_test.info('TLS authentication failed with invalid certificate')
tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb, client_cert = self.CLIENT_CERT_INVALID)
def eap_tls_eapTlsHelloReq_pkt_delay():
log_test.info('Holding eapTlsCertReq packet with invalid cert for a | |
<gh_stars>1-10
SAMPLE_PROXY_LIST = [
{
'ip': '192.168.127.12',
'port': '53281',
'country_code': 'BR',
'country': 'Brazil',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'TH',
'country': 'Thailand',
'type': 'http'
}, {
'ip': '172.16.31.10',
'port': '8080',
'country_code': 'TH',
'country': 'Thailand',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '42413',
'country_code': 'US',
'country': 'United States',
'type': 'https'
}, {
'ip': '172.16.17.32',
'port': '42891',
'country_code': 'IQ',
'country': 'Iraq',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '44550',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '8080',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '31596',
'country_code': 'MX',
'country': 'Mexico',
'type': 'socks4'
}, {
'ip': '172.16.31.10',
'port': '8080',
'country_code': 'TH',
'country': 'Thailand',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '8080',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '4145',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '8080',
'country_code': 'BR',
'country': 'Brazil',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '59311',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '44550',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '59611',
'country_code': 'TH',
'country': 'Thailand',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '8080',
'country_code': 'IR',
'country': 'Iran',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '49089',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '8080',
'country_code': 'NG',
'country': 'Nigeria',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '33164',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '9090',
'country_code': 'TR',
'country': 'Turkey',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '8291',
'country_code': 'SK',
'country': 'Slovakia',
'type': 'socks4'
}, {
'ip': '172.16.31.10',
'port': '8888',
'country_code': 'RS',
'country': 'Serbia',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '57396',
'country_code': 'TH',
'country': 'Thailand',
'type': 'https'
}, {
'ip': '172.16.17.32',
'port': '41258',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '8080',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '8080',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'http'
}, {
'ip': '172.16.31.10',
'port': '45120',
'country_code': 'RS',
'country': 'Serbia',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '6667',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '30507',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '4145',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '6666',
'country_code': 'IN',
'country': 'India',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'IN',
'country': 'India',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '36331',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '44861',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '54047',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '4153',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '51327',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '50916',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '59813',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '8080',
'country_code': 'NG',
'country': 'Nigeria',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '83',
'country_code': 'IN',
'country': 'India',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '59086',
'country_code': 'KZ',
'country': 'Kazakhstan',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '4145',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'IN',
'country': 'India',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '21231',
'country_code': 'GE',
'country': 'Georgia',
'type': 'https'
}, {
'ip': '172.16.31.10',
'port': '8080',
'country_code': 'BR',
'country': 'Brazil',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '4145',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.31.10',
'port': '1080',
'country_code': 'SG',
'country': 'Singapore',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '32480',
'country_code': 'GN',
'country': 'Guinea',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '46174',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'GR',
'country': 'Greece',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '80',
'country_code': 'TH',
'country': 'Thailand',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '1080',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '4145',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'http'
}, {
'ip': '172.16.31.10',
'port': '9050',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '8080',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '8080',
'country_code': 'IN',
'country': 'India',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '60596',
'country_code': 'TR',
'country': 'Turkey',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '15294',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '192.168.127.12',
'port': '47034',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '18656',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '40304',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.31.10',
'port': '8080',
'country_code': 'PK',
'country': 'Pakistan',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '37764',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '3128',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '80',
'country_code': 'HK',
'country': 'Hong Kong',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '8080',
'country_code': 'BR',
'country': 'Brazil',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '80',
'country_code': 'IR',
'country': 'Iran',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '4145',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '53281',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '1080',
'country_code': '',
'country': 'Unknown',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '80',
'country_code': 'JP',
'country': 'Japan',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '57928',
'country_code': 'UA',
'country': 'Ukraine',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '4145',
'country_code': 'SY',
'country': 'Syrian Arab Republic',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '61755',
'country_code': 'IN',
'country': 'India',
'type': 'https'
}, {
'ip': '172.16.58.3',
'port': '8080',
'country_code': 'DE',
'country': 'Germany',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '4145',
'country_code': 'HK',
'country': 'Hong Kong',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '41258',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'http'
}, {
'ip': '172.16.31.10',
'port': '1080',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '59299',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.31.10',
'port': '8080',
'country_code': 'ID',
'country': 'Indonesia',
'type': 'http'
}, {
'ip': '172.16.31.10',
'port': '33994',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '4145',
'country_code': 'IN',
'country': 'India',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '46439',
'country_code': 'US',
'country': 'United States',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '23500',
'country_code': 'BY',
'country': 'Belarus',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '33896',
'country_code': 'KZ',
'country': 'Kazakhstan',
'type': 'socks4'
}, {
'ip': '172.16.58.3',
'port': '9991',
'country_code': 'CO',
'country': 'Colombia',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '48740',
'country_code': 'TR',
'country': 'Turkey',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '31045',
'country_code': 'DE',
'country': 'Germany',
'type': 'https'
}, {
'ip': '172.16.17.32',
'port': '61918',
'country_code': 'RU',
'country': 'Russian Federation',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '60051',
'country_code': 'CM',
'country': 'Cameroon',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '47810',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'http'
}, {
'ip': '172.16.17.32',
'port': '23500',
'country_code': 'MZ',
'country': 'Mozambique',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '8081',
'country_code': 'BR',
'country': 'Brazil',
'type': 'http'
}, {
'ip': '192.168.3.11',
'port': '38324',
'country_code': 'IN',
'country': 'India',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '8080',
'country_code': 'TR',
'country': 'Turkey',
'type': 'http'
}, {
'ip': '192.168.127.12',
'port': '31777',
'country_code': 'IN',
'country': 'India',
'type': 'http'
}, {
'ip': '172.16.58.3',
'port': '4145',
'country_code': 'PL',
'country': 'Poland',
'type': 'socks4'
}, {
'ip': '172.16.17.32',
'port': '4145',
'country_code': 'BR',
'country': 'Brazil',
'type': 'socks4'
}, {
'ip': '192.168.3.11',
'port': '40080',
'country_code': 'VN',
'country': 'Vietnam',
'type': 'https'
}, {
'ip': '192.168.3.11',
'port': '3128',
'country_code': 'VN',
'country': 'Vietnam',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '60815',
'country_code': 'MN',
'country': 'Mongolia',
'type': 'https'
}, {
'ip': '192.168.127.12',
'port': '52581',
'country_code': 'BD',
'country': 'Bangladesh',
'type': 'https'
}, {
'ip': '172.16.17.32',
| |
<gh_stars>0
import cqc.pythonLib as cqc
from simulaqron.settings import simulaqron_settings
from simulaqron.network import Network as SimulaNetwork
from qunetsim.backends.rw_lock import RWLock
from qunetsim.backends.safe_dict import SafeDict
from qunetsim.objects.qubit import Qubit
class CQCBackend(object):
"""
The SimulaQron CQC backend
"""
class Hosts(SafeDict):
# There only should be one instance of Hosts
__instance = None
@staticmethod
def get_instance():
if CQCBackend.Hosts.__instance is not None:
return CQCBackend.Hosts.__instance
else:
return CQCBackend.Hosts()
def __init__(self):
if CQCBackend.Hosts.__instance is not None:
raise Exception("Call get instance to get this class!")
CQCBackend.Hosts.__instance = self
SafeDict.__init__(self)
class CQCConnections(SafeDict):
# There only should be one instance of Hosts
__instance = None
@staticmethod
def get_instance():
if CQCBackend.CQCConnections.__instance is not None:
return CQCBackend.CQCConnections.__instance
else:
return CQCBackend.CQCConnections()
def __init__(self):
if CQCBackend.CQCConnections.__instance is not None:
raise Exception("Call get instance to get this class!")
CQCBackend.CQCConnections.__instance = self
SafeDict.__init__(self)
class EntanglementIDs(SafeDict):
# There only should be one instance of Hosts
__instance = None
@staticmethod
def get_instance():
if CQCBackend.EntanglementIDs.__instance is not None:
return CQCBackend.EntanglementIDs.__instance
else:
return CQCBackend.EntanglementIDs()
def __init__(self):
if CQCBackend.EntanglementIDs.__instance is not None:
raise Exception("Call get instance to get this class!")
CQCBackend.EntanglementIDs.__instance = self
SafeDict.__init__(self)
# SimulaQron comes with an own network simulator
# has to be kept in sync with QuNetSim network
backend_network = None
backend_network_lock = RWLock()
def __init__(self):
self._hosts = CQCBackend.Hosts.get_instance()
self._cqc_connections = CQCBackend.CQCConnections.get_instance()
# keys are from : to, where from is the host calling create EPR
self._entaglement_ids = CQCBackend.EntanglementIDs.get_instance()
self._stopped = False
def start(self, **kwargs):
"""
Starts Backends which have to run in an own thread or process before they
can be used.
Args:
nodes(List): A list of hosts in the network.
"""
print('Starting SimulaQron Network...')
nodes = kwargs['nodes']
CQCBackend.backend_network_lock.acquire_write()
simulaqron_settings.default_settings()
CQCBackend.backend_network = SimulaNetwork(nodes=nodes, force=True)
CQCBackend.backend_network.start()
CQCBackend.backend_network_lock.release_write()
def stop(self):
"""
Stops Backends which are running in an own thread or process.
"""
if not self._stopped:
CQCBackend.backend_network_lock.acquire_write()
CQCBackend.backend_network.stop()
self._stopped = True
CQCBackend.backend_network_lock.release_write()
def add_host(self, host):
"""
Adds a host to the backend.
Args:
host (Host): New Host which should be added.
"""
connection = cqc.CQCConnection(host.host_id)
self._cqc_connections.add_to_dict(host.host_id, connection)
self._hosts.add_to_dict(host.host_id, host)
def create_qubit(self, host_id):
"""
Creates a new Qubit of the type of the backend.
Args:
host_id (String): Id of the host to whom the qubit belongs.
Returns:
Qubit of backend type.
"""
return cqc.qubit(self._cqc_connections.get_from_dict(host_id))
def send_qubit_to(self, qubit, from_host_id, to_host_id):
"""
Sends a qubit to a new host.
Args:
qubit (Qubit): Qubit to be send.
from_host_id (String): From the starting host.
to_host_id (String): New host of the qubit.
"""
cqc_from_host = self._cqc_connections.get_from_dict(from_host_id)
cqc_to_host = self._cqc_connections.get_from_dict(to_host_id)
cqc_from_host.sendQubit(qubit.qubit, cqc_to_host.name)
qubit.qubit = cqc_to_host.recvQubit()
qubit.host = self._hosts.get_from_dict(to_host_id)
def create_EPR(self, host_a_id, host_b_id, q_id=None, block=False):
"""
Creates an EPR pair for two qubits and returns one of the qubits.
Args:
host_a_id (String): ID of the first host who gets the EPR state.
host_b_id (String): ID of the second host who gets the EPR state.
q_id (String): Optional id which both qubits should have.
block (bool): Determines if the created pair should be blocked or not.
Returns:
Returns a qubit. The qubit belongs to host a. To get the second
qubit of host b, the receive_epr function has to be called.
"""
cqc_host_a = self._cqc_connections.get_from_dict(host_a_id)
cqc_host_b = self._cqc_connections.get_from_dict(host_b_id)
host_a = self._hosts.get_from_dict(host_a_id)
q = cqc_host_a.createEPR(cqc_host_b.name)
qubit = Qubit(host_a, qubit=q, q_id=q_id, blocked=block)
# add the ID to a list, so the next returned qubit from recv EPR
# gets assigned the right id
self.store_ent_id(cqc_host_a, cqc_host_b, qubit)
return qubit
def store_ent_id(self, cqc_host_a, cqc_host_b, qubit):
key = cqc_host_a.name + ':' + cqc_host_b.name
ent_list = self._entaglement_ids.get_from_dict(key)
if ent_list is not None:
ent_list.append(qubit.id)
else:
ent_list = [qubit.id]
self._entaglement_ids.add_to_dict(key, ent_list)
def receive_epr(self, host_id, sender_id, q_id=None, block=False):
"""
Called after create EPR in the receiver, to receive the other EPR pair.
Args:
host_id (String): ID of the first host who gets the EPR state.
sender_id (String): ID of the sender of the EPR pair.
q_id (String): Optional id which both qubits should have.
block (bool): Determines if the created pair should be blocked or not.
Returns:
Returns an EPR qubit with the other Host.
"""
cqc_host = self._cqc_connections.get_from_dict(host_id)
host = self._hosts.get_from_dict(host_id)
q = cqc_host.recvEPR()
key = sender_id + ':' + cqc_host.name
ent_list = self._entaglement_ids.get_from_dict(key)
if ent_list is None:
raise Exception("Internal Error!")
id = None
id = ent_list.pop(0)
if q_id is not None and q_id != id:
raise ValueError("q_id doesn't match id!")
self._entaglement_ids.add_to_dict(key, ent_list)
return Qubit(host, qubit=q, q_id=id, blocked=block)
def flush(self, host_id):
"""
CQC specific function.
"""
self._cqc_connections.get_from_dict(host_id).flush()
##########################
# Gate definitions #
#########################
def I(self, qubit):
"""
Perform Identity gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.I()
def X(self, qubit):
"""
Perform pauli X gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.X()
def Y(self, qubit):
"""
Perform pauli Y gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.Y()
def Z(self, qubit):
"""
Perform pauli Z gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.Z()
def H(self, qubit):
"""
Perform Hadamard gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.H()
def T(self, qubit):
"""
Perform T gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.T()
def K(self, qubit):
"""
Perform K gate on a qubit.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
"""
qubit.qubit.K()
def rx(self, qubit, steps):
"""
Perform a rotation pauli x gate with an angle of phi.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
steps (int): Amount of rotation in Rad.
"""
# convert to cqc unit
if steps < 0:
steps = 256 + steps
qubit.qubit.rot_X(steps)
def ry(self, qubit, steps):
"""
Perform a rotation pauli y gate with an angle of phi.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
steps (int): Amount of rotation in Rad.
"""
# convert to cqc unit
# steps = phi * 256.0 / (2.0 * np.pi)
if steps < 0:
steps = 256 + steps
qubit.qubit.rot_Y(steps)
def rz(self, qubit, steps):
"""
Perform a rotation pauli z gate with an angle of phi.
Args:
qubit (Qubit): Qubit on which gate should be applied to.
phi (float): Amount of rotation in Rad.
"""
# convert to cqc unit
# steps = phi * 256.0 / (2.0 * np.pi)
if steps < 0:
steps = 256 + steps
qubit.qubit.rot_Z(steps)
def cnot(self, qubit, target):
"""
Applies a controlled x gate to the target qubit.
Args:
qubit (Qubit): Qubit to control cnot.
target (Qubit): Qubit on which the cnot gate should be applied.
"""
qubit.qubit.cnot(target.qubit)
def cphase(self, qubit, target):
"""
Applies a controlled z gate to the target qubit.
Args:
qubit (Qubit): Qubit to control cphase.
target (Qubit): Qubit on which the cphase gate should be applied.
"""
qubit.qubit.cphase(target.qubit)
def custom_gate(self, qubit, gate):
"""
Applies a custom gate to the qubit.
Args:
qubit(Qubit): Qubit to which the gate is applied.
gate(np.ndarray): 2x2 array of the gate.
"""
raise (EnvironmentError("Not implemented for this backend!"))
def custom_controlled_gate(self, qubit, target, gate):
"""
Applies a custom gate to the target qubit, controlled by the qubit.
Args:
qubit(Qubit): Qubit to control the gate.
target(Qubit): Qubit on which the gate is applied.
gate(nd.array): 2x2 array for the gate applied to target.
"""
raise (EnvironmentError("Not implemented for this backend!"))
def custom_two_qubit_gate(self, qubit1, qubit2, gate):
"""
Applies a custom two qubit gate to qubit1 \\otimes qubit2.
Args:
qubit1(Qubit): First qubit of the gate.
qubit2(Qubit): Second qubit of the gate.
gate(np.ndarray): 4x4 array for the gate applied.
"""
raise (EnvironmentError("Not implemented for this backend!"))
def custom_controlled_two_qubit_gate(self, qubit, target_1, target_2, gate):
"""
Applies a custom gate to the target qubit, controlled by the qubit.
Args:
qubit (Qubit): Qubit to control the gate.
target_1 (Qubit): Qubit on which the gate is applied.
target_2 (Qubit): Qubit on which the gate is applied.
gate (nd.array): 4x4 array for the | |
coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_process_dags_not_create_dagrun_for_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
scheduler = DagFileProcessor(dag_ids=[dag.dag_id], log=mock.MagicMock())
scheduler._process_task_instances = mock.MagicMock()
scheduler.manage_slas = mock.MagicMock()
scheduler._process_dags([dag] + dag.subdags)
with create_session() as session:
sub_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.subdags[0].dag_id).count()
)
self.assertEqual(0, sub_dagruns)
parent_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).count()
)
self.assertGreater(parent_dagruns, 0)
@patch.object(TaskInstance, 'handle_failure')
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath="A",
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
dag_file_processor.execute_on_failure_callbacks(dagbag, requests)
mock_ti_handle_failure.assert_called_once_with(
"Message",
conf.getboolean('core', 'unit_test_mode'),
mock.ANY
)
def test_process_file_should_failure_callback(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'
)
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session, NamedTemporaryFile(delete=False) as callback_file:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('test_om_failure_callback_dag')
task = dag.get_task(task_id='test_om_failure_callback_task')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
callback_file.close()
with mock.patch.dict("os.environ", {"AIRFLOW_CALLBACK_FILE": callback_file.name}):
dag_file_processor.process_file(dag_file, requests)
with open(callback_file.name) as callback_file2:
content = callback_file2.read()
self.assertEqual("Callback fired", content)
os.remove(callback_file.name)
def test_should_parse_only_unpaused_dags(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_multiple_dags.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
with create_session() as session:
session.query(TaskInstance).delete()
(
session.query(DagModel)
.filter(DagModel.dag_id == "test_multiple_dags__dag_1")
.update({DagModel.is_paused: True}, synchronize_session=False)
)
simple_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_multiple_dags__dag_2'], [dag.dag_id for dag in simple_dags])
self.assertEqual({'test_multiple_dags__dag_2'}, {ti.dag_id for ti in tis})
def test_should_mark_dummy_task_as_success(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_only_dummy_tasks.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
session.query(DagModel).delete()
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
simple_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_only_dummy_tasks'], [dag.dag_id for dag in simple_dags])
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', None),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', 'success'),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
@pytest.mark.quarantined
class TestDagFileProcessorQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for different DAG files.
Each test has saved queries count in the table/spreadsheets. If you make a change that affected the number
of queries, please update the tables.
These tests allow easy detection when a change is made that affects the performance of the
DagFileProcessor.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with one task per DAG file
([ 1, 1, 1, 1], 1, 1, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 1, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "no_structure"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "no_structure"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "binary_tree"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "star"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "grid"), # noqa
# One DAG with five tasks per DAG file
([ 1, 1, 1, 1], 1, 5, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 5, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 5, "1d", "@once", "no_structure"), # noqa
([10, 6, 6, 6], 1, 5, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 5, "1d", "30m", "no_structure"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "linear"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "binary_tree"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "star"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "grid"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 1, 1, 1, 1], 10, 10, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 10, 10, "1d", "None", "linear"), # noqa
([81, 41, 41, 41], 10, 10, "1d", "@once", "no_structure"), # noqa
([91, 51, 51, 51], 10, 10, "1d", "@once", "linear"), # noqa
([81, 111, 111, 111], 10, 10, "1d", "30m", "no_structure"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "linear"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "binary_tree"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "star"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "grid"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_dags_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor._process_dags(dagbag.dags.values())
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with two tasks per DAG file
([ 5, 5, 5, 5], 1, 1, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 1, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "no_structure"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "no_structure"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "linear"), # noqa
# One DAG with five tasks per DAG file
([ 5, 5, 5, 5], 1, 5, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 5, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 5, "1d", "@once", "no_structure"), # noqa
([16, 10, 10, 10], 1, 5, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 5, "1d", "30m", "no_structure"), # noqa
([16, 20, 24, 28], 1, 5, "1d", "30m", "linear"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 5, 5, 5, 5], 10, 10, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 10, 10, "1d", "None", "linear"), # noqa
([87, 45, 45, 45], 10, 10, "1d", "@once", "no_structure"), # noqa
([97, 55, 55, 55], 10, 10, "1d", "@once", "linear"), # noqa
([87, 117, 117, 117], 10, 10, "1d", "30m", "no_structure"), # noqa
([97, 137, 137, 137], 10, 10, "1d", "30m", "linear"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_file_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True'
}):
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor.process_file(ELASTIC_DAG_FILE, [])
@pytest.mark.usefixtures("disable_load_example")
class TestSchedulerJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(store_serialized_dags=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(store_serialized_dags=True)
def test_is_alive(self):
job = | |
<reponame>adrn/astrobase
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''rfclass.py - <NAME> (<EMAIL>) - Dec 2017
License: MIT. See the LICENSE file for more details.
Does variable classification using random forests. Two types of classification
are supported:
- Variable classification using non-periodic features: this is used to perform a
binary classification between non-variable and variable. Uses the features in
varclass/features.py and varclass/starfeatures.py.
- Periodic variable classification using periodic features: this is used to
perform multi-class classification for periodic variables using the features
in varclass/periodicfeatures.py and varclass/starfeatures.py. The classes
recognized are listed in PERIODIC_VARCLASSES below and were generated from
manual classification run on various HATNet, HATSouth and HATPI fields.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from time import time as unixtime
import glob
import os.path
import os
import shutil
import itertools
try:
import cPickle as pickle
except:
import pickle
try:
from tqdm import tqdm
TQDM = True
except:
TQDM = False
pass
import numpy as np
import numpy.random as npr
# seed the numpy random generator
# we'll use RANDSEED for scipy.stats distribution functions as well
RANDSEED = 0xdecaff
npr.seed(RANDSEED)
from scipy.stats import randint as sp_randint
# scikit imports
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.model_selection import train_test_split
from operator import itemgetter
from sklearn.metrics import r2_score, median_absolute_error, \
precision_score, recall_score, confusion_matrix, f1_score
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#######################
## UTILITY FUNCTIONS ##
#######################
# Utility function to report best scores
# modified from a snippet taken from:
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_randomized_search.html
def gridsearch_report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
LOGINFO("Model with rank: {0}".format(i))
LOGINFO("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
LOGINFO("Parameters: {0}".format(results['params'][candidate]))
###################################
## NON-PERIODIC VAR FEATURE LIST ##
###################################
NONPERIODIC_FEATURES_TO_COLLECT = [
'stetsonj',
'stetsonk',
'amplitude',
'magnitude_ratio',
'linear_fit_slope',
'eta_normal',
'percentile_difference_flux_percentile',
'mad',
'skew',
'kurtosis',
'mag_iqr',
'beyond1std',
'grcolor',
'gicolor',
'ricolor',
'bvcolor',
'jhcolor',
'jkcolor',
'hkcolor',
'gkcolor',
'propermotion',
]
########################
## FEATURE COLLECTION ##
########################
def collect_features(
featuresdir,
magcol,
outfile,
pklglob='varfeatures-*.pkl',
featurestouse=NONPERIODIC_FEATURES_TO_COLLECT,
maxobjects=None,
labeldict=None,
labeltype='binary',
):
'''This collects variability features into arrays.
featuresdir is the directory where all the varfeatures pickles are. Use
pklglob to specify the glob to search for. varfeatures pickles contain
objectids, a light curve magcol, and features as dict key-vals. The lcproc
module can be used to produce these.
magcol is the light curve magnitude col key to use when looking inside each
varfeatures pickle.
Each varfeature pickle can contain any combination of non-periodic, stellar,
and periodic features; these must have the same names as elements in the
list of strings provided in featurestouse. This tries to get all the
features listed in NONPERIODIC_FEATURES_TO_COLLECT by default. If
featurestouse is not None, gets only the features listed in this kwarg
instead.
maxobjects controls how many pickles to process.
If labeldict is not None, it must be a dict with the following key:val
list:
'<objectid>':<label value>
for each objectid collected from the varfeatures pickles. This will turn the
collected information into a training set for classifiers.
Example: to carry out non-periodic variable feature collection of fake LCS
prepared by fakelcs.generation, use the value of the 'isvariable' dict elem
from fakelcs-info.pkl here, like so:
labeldict={x:y for x,y in zip(fakelcinfo['objectid'],
fakelcinfo['isvariable'])}
labeltype is either 'binary' or 'classes' for binary/multi-class
classification respectively.
'''
# list of input pickles generated by varfeatures in lcproc.py
pklist = glob.glob(os.path.join(featuresdir, pklglob))
if maxobjects:
pklist = pklist[:maxobjects]
# fancy progress bar with tqdm if present
if TQDM:
listiterator = tqdm(pklist)
else:
listiterator = pklist
# go through all the varfeatures arrays
feature_dict = {'objectids':[],'magcol':magcol, 'availablefeatures':[]}
LOGINFO('collecting features for magcol: %s' % magcol)
for pkl in listiterator:
with open(pkl,'rb') as infd:
varf = pickle.load(infd)
# update the objectid list
objectid = varf['objectid']
if objectid not in feature_dict['objectids']:
feature_dict['objectids'].append(objectid)
thisfeatures = varf[magcol]
if featurestouse and len(featurestouse) > 0:
featurestoget = featurestouse
else:
featurestoget = NONPERIODIC_FEATURES_TO_COLLECT
# collect all the features for this magcol/objectid combination
for feature in featurestoget:
# update the global feature list if necessary
if ((feature not in feature_dict['availablefeatures']) and
(feature in thisfeatures)):
feature_dict['availablefeatures'].append(feature)
feature_dict[feature] = []
if feature in thisfeatures:
feature_dict[feature].append(
thisfeatures[feature]
)
# now that we've collected all the objects and their features, turn the list
# into arrays, and then concatenate them
for feat in feature_dict['availablefeatures']:
feature_dict[feat] = np.array(feature_dict[feat])
feature_dict['objectids'] = np.array(feature_dict['objectids'])
feature_array = np.column_stack([feature_dict[feat] for feat in
feature_dict['availablefeatures']])
feature_dict['features_array'] = feature_array
# if there's a labeldict available, use it to generate a label array. this
# feature collection is now a training set.
if isinstance(labeldict, dict):
labelarray = np.zeros(feature_dict['objectids'].size, dtype=np.int64)
# populate the labels for each object in the training set
for ind, objectid in enumerate(feature_dict['objectids']):
if objectid in labeldict:
# if this is a binary classifier training set, convert bools to
# ones and zeros
if labeltype == 'binary':
if labeldict[objectid]:
labelarray[ind] = 1
# otherwise, use the actual class label integer
elif labeltype == 'classes':
labelarray[ind] = labeldict[objectid]
feature_dict['labels_array'] = labelarray
feature_dict['kwargs'] = {'pklglob':pklglob,
'featurestouse':featurestouse,
'maxobjects':maxobjects,
'labeltype':labeltype}
# write the info to the output pickle
with open(outfile,'wb') as outfd:
pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL)
# return the feature_dict
return feature_dict
#################################
## TRAINING THE RF CLASSIFIERS ##
#################################
def train_rf_classifier(
collected_features,
test_fraction=0.25,
n_crossval_iterations=20,
n_kfolds=5,
crossval_scoring_metric='f1',
classifier_to_pickle=None,
nworkers=-1,
):
'''This gets the best RF classifier after running cross-validation.
- splits the training set into test/train samples
- does KFold stratified cross-validation using RandomizedSearchCV
- gets the randomforest with the best performance after CV
- gets the confusion matrix for the test set
Runs on the output dict from functions that produce dicts similar to that
produced by collect_features.
By default, this is tuned for binary classification. Change the
crossval_scoring_metric to another metric (probably 'accuracy') for
multi-class classification, e.g. for periodic variable classification. See
the link below to specify the scoring parameter (this can either be a string
or an actual scorer object):
http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
'''
if isinstance(collected_features,str) and os.path.exists(collected_features):
with open(collected_features,'rb') as infd:
fdict = pickle.load(infd)
elif isinstance(collected_features, dict):
fdict = collected_features
else:
LOGERROR("can't figure out the input collected_features arg")
return None
tfeatures = fdict['features_array']
tlabels = fdict['labels_array']
tfeaturenames = fdict['availablefeatures']
tmagcol = fdict['magcol']
tobjectids = fdict['objectids']
# split the training set into training/test samples using stratification
# to keep the same fraction of variable/nonvariables in each
training_features, testing_features, training_labels, testing_labels = (
train_test_split(
tfeatures,
tlabels,
test_size=test_fraction,
random_state=RANDSEED,
stratify=tlabels
)
)
# get a random forest classifier
clf = RandomForestClassifier(n_jobs=nworkers,
random_state=RANDSEED)
# this is the grid def for hyperparam optimization
rf_hyperparams = {
"max_depth": [3,4,5,None],
"n_estimators":sp_randint(100,2000),
"max_features": sp_randint(1, 5),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(2, 11),
}
# run the stratified kfold cross-validation on training features using our
# random forest classifier object
cvsearch = RandomizedSearchCV(
clf,
param_distributions=rf_hyperparams,
n_iter=n_crossval_iterations,
scoring=crossval_scoring_metric,
cv=StratifiedKFold(n_splits=n_kfolds,
shuffle=True,
random_state=RANDSEED),
random_state=RANDSEED
)
LOGINFO('running grid-search CV to optimize RF hyperparameters...')
cvsearch_classifiers = cvsearch.fit(training_features,
training_labels)
# report on the classifiers' performance
gridsearch_report(cvsearch_classifiers.cv_results_)
# get the best classifier after CV is done
bestclf = cvsearch_classifiers.best_estimator_
bestclf_score = cvsearch_classifiers.best_score_
bestclf_hyperparams = cvsearch_classifiers.best_params_
# test this classifier on the testing set
test_predicted_labels = bestclf.predict(testing_features)
recscore = recall_score(testing_labels, test_predicted_labels)
precscore = precision_score(testing_labels,test_predicted_labels)
f1score = f1_score(testing_labels, test_predicted_labels)
confmatrix = confusion_matrix(testing_labels, test_predicted_labels)
# write the classifier, its training/testing set, and its stats to the
# pickle if requested
outdict = {'features':tfeatures,
'labels':tlabels,
'feature_names':tfeaturenames,
'magcol':tmagcol,
'objectids':tobjectids,
'kwargs':{'test_fraction':test_fraction,
'n_crossval_iterations':n_crossval_iterations,
'n_kfolds':n_kfolds,
'crossval_scoring_metric':crossval_scoring_metric,
'nworkers':nworkers},
'collect_kwargs':fdict['kwargs'],
'testing_features':testing_features,
'testing_labels':testing_labels,
'training_features':training_features,
'training_labels':training_labels,
'best_classifier':bestclf,
'best_score':bestclf_score,
'best_hyperparams':bestclf_hyperparams,
'best_recall':recscore,
'best_precision':precscore,
'best_f1':f1score,
'best_confmatrix':confmatrix}
if classifier_to_pickle:
with open(classifier_to_pickle,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
# return this classifier and accompanying info
return outdict
def apply_rf_classifier(classifier,
varfeaturesdir,
outpickle,
maxobjects=None):
'''This applys an RF classifier trained using train_rf_classifier
to pickles in varfeaturesdir.
classifier is the output dict or pickle from get_rf_classifier. This will
contain | |
self.nodeInfo = nodeInfo
self.action = action
self.time_stamp = time_stamp
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.nodeInfo = NodeInfo()
self.nodeInfo.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.action = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.time_stamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProfileRequest')
if self.nodeInfo is not None:
oprot.writeFieldBegin('nodeInfo', TType.STRUCT, 1)
self.nodeInfo.write(oprot)
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.I32, 2)
oprot.writeI32(self.action)
oprot.writeFieldEnd()
if self.time_stamp is not None:
oprot.writeFieldBegin('time_stamp', TType.I64, 3)
oprot.writeI64(self.time_stamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.nodeInfo is None:
raise TProtocol.TProtocolException(message='Required field nodeInfo is unset!')
if self.action is None:
raise TProtocol.TProtocolException(message='Required field action is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.nodeInfo)
value = (value * 31) ^ hash(self.action)
value = (value * 31) ^ hash(self.time_stamp)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetInfoOptions:
"""
Attributes:
- num_err_choice
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'num_err_choice', None, None, ), # 1
)
def __init__(self, num_err_choice=None,):
self.num_err_choice = num_err_choice
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_err_choice = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetInfoOptions')
if self.num_err_choice is not None:
oprot.writeFieldBegin('num_err_choice', TType.I32, 1)
oprot.writeI32(self.num_err_choice)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.num_err_choice)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LogLevel:
"""
Attributes:
- action
- target_log_level
- reset_log_level_timeout_secs
- reset_log_level_timeout_epoch
- reset_log_level
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'action', None, None, ), # 1
(2, TType.STRING, 'target_log_level', None, None, ), # 2
(3, TType.I32, 'reset_log_level_timeout_secs', None, None, ), # 3
(4, TType.I64, 'reset_log_level_timeout_epoch', None, None, ), # 4
(5, TType.STRING, 'reset_log_level', None, None, ), # 5
)
def __init__(self, action=None, target_log_level=None, reset_log_level_timeout_secs=None, reset_log_level_timeout_epoch=None, reset_log_level=None,):
self.action = action
self.target_log_level = target_log_level
self.reset_log_level_timeout_secs = reset_log_level_timeout_secs
self.reset_log_level_timeout_epoch = reset_log_level_timeout_epoch
self.reset_log_level = reset_log_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.action = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.target_log_level = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.reset_log_level_timeout_secs = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.reset_log_level_timeout_epoch = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.reset_log_level = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LogLevel')
if self.action is not None:
oprot.writeFieldBegin('action', TType.I32, 1)
oprot.writeI32(self.action)
oprot.writeFieldEnd()
if self.target_log_level is not None:
oprot.writeFieldBegin('target_log_level', TType.STRING, 2)
oprot.writeString(self.target_log_level.encode('utf-8'))
oprot.writeFieldEnd()
if self.reset_log_level_timeout_secs is not None:
oprot.writeFieldBegin('reset_log_level_timeout_secs', TType.I32, 3)
oprot.writeI32(self.reset_log_level_timeout_secs)
oprot.writeFieldEnd()
if self.reset_log_level_timeout_epoch is not None:
oprot.writeFieldBegin('reset_log_level_timeout_epoch', TType.I64, 4)
oprot.writeI64(self.reset_log_level_timeout_epoch)
oprot.writeFieldEnd()
if self.reset_log_level is not None:
oprot.writeFieldBegin('reset_log_level', TType.STRING, 5)
oprot.writeString(self.reset_log_level.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.action is None:
raise TProtocol.TProtocolException(message='Required field action is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.action)
value = (value * 31) ^ hash(self.target_log_level)
value = (value * 31) ^ hash(self.reset_log_level_timeout_secs)
value = (value * 31) ^ hash(self.reset_log_level_timeout_epoch)
value = (value * 31) ^ hash(self.reset_log_level)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LogConfig:
"""
Attributes:
- named_logger_level
"""
thrift_spec = (
None, # 0
None, # 1
(2, TType.MAP, 'named_logger_level', (TType.STRING,None,TType.STRUCT,(LogLevel, LogLevel.thrift_spec)), None, ), # 2
)
def __init__(self, named_logger_level=None,):
self.named_logger_level = named_logger_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.MAP:
self.named_logger_level = {}
(_ktype630, _vtype631, _size629 ) = iprot.readMapBegin()
for _i633 in xrange(_size629):
_key634 = iprot.readString().decode('utf-8')
_val635 = LogLevel()
_val635.read(iprot)
self.named_logger_level[_key634] = _val635
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LogConfig')
if self.named_logger_level is not None:
oprot.writeFieldBegin('named_logger_level', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.named_logger_level))
for kiter636,viter637 in self.named_logger_level.items():
oprot.writeString(kiter636.encode('utf-8'))
viter637.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.named_logger_level)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologyHistoryInfo:
"""
Attributes:
- topo_ids
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'topo_ids', (TType.STRING,None), None, ), # 1
)
def __init__(self, topo_ids=None,):
self.topo_ids = topo_ids
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.topo_ids = []
(_etype641, _size638) = iprot.readListBegin()
for _i642 in xrange(_size638):
_elem643 = iprot.readString().decode('utf-8')
self.topo_ids.append(_elem643)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyHistoryInfo')
if self.topo_ids is not None:
oprot.writeFieldBegin('topo_ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.topo_ids))
for iter644 in self.topo_ids:
oprot.writeString(iter644.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topo_ids)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DRPCRequest:
"""
Attributes:
- func_args
- request_id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'func_args', None, None, ), # 1
(2, TType.STRING, 'request_id', None, None, ), # 2
)
def __init__(self, func_args=None, request_id=None,):
self.func_args = func_args
self.request_id = request_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.func_args = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.request_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, | |
str
"""
return self._auth_realm
@auth_realm.setter
def auth_realm(self, auth_realm):
"""Sets the auth_realm of this CreateLdapServerXo.
The SASL realm to bind to. Required if authScheme is CRAM_MD5 or DIGEST_MD5 # noqa: E501
:param auth_realm: The auth_realm of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._auth_realm = auth_realm
@property
def auth_username(self):
"""Gets the auth_username of this CreateLdapServerXo. # noqa: E501
This must be a fully qualified username if simple authentication is used. Required if authScheme other than none. # noqa: E501
:return: The auth_username of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._auth_username
@auth_username.setter
def auth_username(self, auth_username):
"""Sets the auth_username of this CreateLdapServerXo.
This must be a fully qualified username if simple authentication is used. Required if authScheme other than none. # noqa: E501
:param auth_username: The auth_username of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._auth_username = auth_username
@property
def connection_timeout_seconds(self):
"""Gets the connection_timeout_seconds of this CreateLdapServerXo. # noqa: E501
How long to wait before timeout # noqa: E501
:return: The connection_timeout_seconds of this CreateLdapServerXo. # noqa: E501
:rtype: int
"""
return self._connection_timeout_seconds
@connection_timeout_seconds.setter
def connection_timeout_seconds(self, connection_timeout_seconds):
"""Sets the connection_timeout_seconds of this CreateLdapServerXo.
How long to wait before timeout # noqa: E501
:param connection_timeout_seconds: The connection_timeout_seconds of this CreateLdapServerXo. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and connection_timeout_seconds is None: # noqa: E501
raise ValueError("Invalid value for `connection_timeout_seconds`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
connection_timeout_seconds is not None and connection_timeout_seconds > 3600): # noqa: E501
raise ValueError("Invalid value for `connection_timeout_seconds`, must be a value less than or equal to `3600`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
connection_timeout_seconds is not None and connection_timeout_seconds < 1): # noqa: E501
raise ValueError("Invalid value for `connection_timeout_seconds`, must be a value greater than or equal to `1`") # noqa: E501
self._connection_timeout_seconds = connection_timeout_seconds
@property
def connection_retry_delay_seconds(self):
"""Gets the connection_retry_delay_seconds of this CreateLdapServerXo. # noqa: E501
How long to wait before retrying # noqa: E501
:return: The connection_retry_delay_seconds of this CreateLdapServerXo. # noqa: E501
:rtype: int
"""
return self._connection_retry_delay_seconds
@connection_retry_delay_seconds.setter
def connection_retry_delay_seconds(self, connection_retry_delay_seconds):
"""Sets the connection_retry_delay_seconds of this CreateLdapServerXo.
How long to wait before retrying # noqa: E501
:param connection_retry_delay_seconds: The connection_retry_delay_seconds of this CreateLdapServerXo. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and connection_retry_delay_seconds is None: # noqa: E501
raise ValueError("Invalid value for `connection_retry_delay_seconds`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
connection_retry_delay_seconds is not None and connection_retry_delay_seconds < 0): # noqa: E501
raise ValueError("Invalid value for `connection_retry_delay_seconds`, must be a value greater than or equal to `0`") # noqa: E501
self._connection_retry_delay_seconds = connection_retry_delay_seconds
@property
def max_incidents_count(self):
"""Gets the max_incidents_count of this CreateLdapServerXo. # noqa: E501
How many retry attempts # noqa: E501
:return: The max_incidents_count of this CreateLdapServerXo. # noqa: E501
:rtype: int
"""
return self._max_incidents_count
@max_incidents_count.setter
def max_incidents_count(self, max_incidents_count):
"""Sets the max_incidents_count of this CreateLdapServerXo.
How many retry attempts # noqa: E501
:param max_incidents_count: The max_incidents_count of this CreateLdapServerXo. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and max_incidents_count is None: # noqa: E501
raise ValueError("Invalid value for `max_incidents_count`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
max_incidents_count is not None and max_incidents_count < 0): # noqa: E501
raise ValueError("Invalid value for `max_incidents_count`, must be a value greater than or equal to `0`") # noqa: E501
self._max_incidents_count = max_incidents_count
@property
def user_base_dn(self):
"""Gets the user_base_dn of this CreateLdapServerXo. # noqa: E501
The relative DN where user objects are found (e.g. ou=people). This value will have the Search base DN value appended to form the full User search base DN. # noqa: E501
:return: The user_base_dn of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_base_dn
@user_base_dn.setter
def user_base_dn(self, user_base_dn):
"""Sets the user_base_dn of this CreateLdapServerXo.
The relative DN where user objects are found (e.g. ou=people). This value will have the Search base DN value appended to form the full User search base DN. # noqa: E501
:param user_base_dn: The user_base_dn of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_base_dn = user_base_dn
@property
def user_subtree(self):
"""Gets the user_subtree of this CreateLdapServerXo. # noqa: E501
Are users located in structures below the user base DN? # noqa: E501
:return: The user_subtree of this CreateLdapServerXo. # noqa: E501
:rtype: bool
"""
return self._user_subtree
@user_subtree.setter
def user_subtree(self, user_subtree):
"""Sets the user_subtree of this CreateLdapServerXo.
Are users located in structures below the user base DN? # noqa: E501
:param user_subtree: The user_subtree of this CreateLdapServerXo. # noqa: E501
:type: bool
"""
self._user_subtree = user_subtree
@property
def user_object_class(self):
"""Gets the user_object_class of this CreateLdapServerXo. # noqa: E501
LDAP class for user objects # noqa: E501
:return: The user_object_class of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_object_class
@user_object_class.setter
def user_object_class(self, user_object_class):
"""Sets the user_object_class of this CreateLdapServerXo.
LDAP class for user objects # noqa: E501
:param user_object_class: The user_object_class of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_object_class = user_object_class
@property
def user_ldap_filter(self):
"""Gets the user_ldap_filter of this CreateLdapServerXo. # noqa: E501
LDAP search filter to limit user search # noqa: E501
:return: The user_ldap_filter of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_ldap_filter
@user_ldap_filter.setter
def user_ldap_filter(self, user_ldap_filter):
"""Sets the user_ldap_filter of this CreateLdapServerXo.
LDAP search filter to limit user search # noqa: E501
:param user_ldap_filter: The user_ldap_filter of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_ldap_filter = user_ldap_filter
@property
def user_id_attribute(self):
"""Gets the user_id_attribute of this CreateLdapServerXo. # noqa: E501
This is used to find a user given its user ID # noqa: E501
:return: The user_id_attribute of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_id_attribute
@user_id_attribute.setter
def user_id_attribute(self, user_id_attribute):
"""Sets the user_id_attribute of this CreateLdapServerXo.
This is used to find a user given its user ID # noqa: E501
:param user_id_attribute: The user_id_attribute of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_id_attribute = user_id_attribute
@property
def user_real_name_attribute(self):
"""Gets the user_real_name_attribute of this CreateLdapServerXo. # noqa: E501
This is used to find a real name given the user ID # noqa: E501
:return: The user_real_name_attribute of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_real_name_attribute
@user_real_name_attribute.setter
def user_real_name_attribute(self, user_real_name_attribute):
"""Sets the user_real_name_attribute of this CreateLdapServerXo.
This is used to find a real name given the user ID # noqa: E501
:param user_real_name_attribute: The user_real_name_attribute of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_real_name_attribute = user_real_name_attribute
@property
def user_email_address_attribute(self):
"""Gets the user_email_address_attribute of this CreateLdapServerXo. # noqa: E501
This is used to find an email address given the user ID # noqa: E501
:return: The user_email_address_attribute of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_email_address_attribute
@user_email_address_attribute.setter
def user_email_address_attribute(self, user_email_address_attribute):
"""Sets the user_email_address_attribute of this CreateLdapServerXo.
This is used to find an email address given the user ID # noqa: E501
:param user_email_address_attribute: The user_email_address_attribute of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_email_address_attribute = user_email_address_attribute
@property
def user_password_attribute(self):
"""Gets the user_password_attribute of this CreateLdapServerXo. # noqa: E501
If this field is blank the user will be authenticated against a bind with the LDAP server # noqa: E501
:return: The user_password_attribute of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._user_password_attribute
@user_password_attribute.setter
def user_password_attribute(self, user_password_attribute):
"""Sets the user_password_attribute of this CreateLdapServerXo.
If this field is blank the user will be authenticated against a bind with the LDAP server # noqa: E501
:param user_password_attribute: The user_password_attribute of this CreateLdapServerXo. # noqa: E501
:type: str
"""
self._user_password_attribute = user_password_attribute
@property
def ldap_groups_as_roles(self):
"""Gets the ldap_groups_as_roles of this CreateLdapServerXo. # noqa: E501
Denotes whether LDAP assigned roles are used as NXRM roles # noqa: E501
:return: The ldap_groups_as_roles of this CreateLdapServerXo. # noqa: E501
:rtype: bool
"""
return self._ldap_groups_as_roles
@ldap_groups_as_roles.setter
def ldap_groups_as_roles(self, ldap_groups_as_roles):
"""Sets the ldap_groups_as_roles of this CreateLdapServerXo.
Denotes whether LDAP assigned roles are used as NXRM roles # noqa: E501
:param ldap_groups_as_roles: The ldap_groups_as_roles of this CreateLdapServerXo. # noqa: E501
:type: bool
"""
| |
<reponame>ChildMindInstitute/SM_EEG<filename>SM_openSMILE/openSMILE_preprocessing/noise_replacement/noise_replacement.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
noise_replacement.py
Script to replace silenced noises in data sound files.
Author:
– <NAME>, 2016–2017 (<EMAIL>)
© 2016–2017, Child Mind Institute, Apache v2.0 License
Created on Mon Dec 19 17:00:00 2016
"""
import os, sys
if os.path.abspath('../../..') not in sys.path:
sys.path.append(os.path.abspath('../../..'))
import csv, fftnoise, math, numpy as np, pandas as pd, pydub, random
from xml.etree import ElementTree as ET
from scipy import signal
from scipy.io import wavfile
from SM_openSMILE.openSMILE_preprocessing.noise_replacement import \
generate_sample as gs
from SM_openSMILE.openSMILE_runSM import openSMILE_csv as oS_c
def analyze_and_generate(path):
"""
Function to find ambient clips, get their amplitude and power spectrum,
and generate an ambeint mask based on this information.
Parameters
----------
path : string
the absolute path of the original soundfile
Returns
-------
mask : pydub audio segment
the generated ambient mask
"""
print(''.join(["Analyzing ", path]))
input_data = wavfile.read(path)
audio_l = input_data[1][:, 0]
audio_r = input_data[1][:, 1]
rate = input_data[0]
ambience = get_ambient_clips(path)
silence_table(path, ambience)
for start, stop in ambience:
try:
np.concatenate((sample_l, audio_l[start:stop]))
np.concatenate((sample_r, audio_r[start:stop]))
except NameError:
sample_l = audio_l[start:stop]
sample_r = audio_r[start:stop]
fft_l = np.fft.fft(sample_l)
fft_r = np.fft.fft(sample_r)
l_sample, l_Pxx = signal.periodogram(fft_l, rate)
r_sample, r_Pxx = signal.periodogram(fft_r, rate)
l_sound = fftnoise.fftnoise(l_Pxx)
r_sound = fftnoise.fftnoise(r_Pxx)
out_file = os.path.join(os.path.dirname(path),"gen_mask.wav")
wavfile.write(out_file, rate, np.transpose(np.array([l_sound, r_sound])))
mask = pydub.AudioSegment.from_wav(out_file)
original = pydub.AudioSegment.from_wav(path)
print("build baseline")
baseline = build_new_soundfile(original, rate, None, get_ambient_clips(
path))
mask = mask - abs(mask.dBFS - baseline.dBFS)
mask = grow_mask(mask, len(original))
mask.export(out_file, format="wav")
return mask
def borders_frames_to_ms(borders, rate):
"""
Function to convert a list of 2-item lists or tuples from frames to
milliseconds.
Parameters
----------
borders : list
a list of 2-item lists or tuples, each item of which is a number of
frames
rate : float
frames per millisecond (fps / 1000)
Returns
-------
frame_borders : list
a list of 2-item lists or tuples, each item of which is a number of
milliseconds
"""
frame_borders = []
for start, stop in borders:
frame_borders.append((math.floor(start / rate), math.ceil(stop /
rate)))
return frame_borders
def borders_ms_to_frames(borders, rate):
"""
Function to convert a list of 2-item lists or tuples from milliseconds to
frames.
Parameters
----------
borders : list
a list of 2-item lists or tuples, each item of which is a number of
milliseconds
rate : float
frames per millisecond (fps / 1000)
Returns
-------
frame_borders : list
a list of 2-item lists or tuples, each item of which is a number of
frames
"""
frame_borders = []
for start, stop in borders:
frame_borders.append((math.floor(start * rate), math.ceil(stop *
rate)))
return frame_borders
def build_new_soundfile(with_silence, rate, mask, borders):
"""
Given a soundfile, an optional mask, and a list of time-pairs,
concatenate the segments outside of the time-pairs, replacing
the time-pair marked segments with the mask, if applicable.
Parameters
----------
with_silence : pydub audio segment
the segment to reconstruct
rate : float
frames per millisecond (fps / 1000)
mask : pydub audio segment or None
the mask segment to fill from
borders : list of 2 item lists or tuples or None
the time-pairs marking the beginning and ends of segments to cut
or nothing to cut
Returns
-------
new_sound : pydub audio segment
the reconstructed segment
"""
if (not borders):
print("No marked segments.")
return with_silence
borders = borders_ms_to_frames(borders, rate)
segmented_sound = []
seg_start = 0
if(borders[0][0] == 0):
seg_stop = borders[0][1]
"""
print(''.join(["initial building with segment [0:",
str(seg_stop), "] out of ", str(len(with_silence) * rate)]))
segmented_sound.append(with_silence.get_sample_slice(0, seg_stop))
"""
seg_start = seg_stop
for pair in borders:
if(pair[0] > 0):
try:
segmented_sound.append(with_silence.get_sample_slice(seg_start,
pair[0]))
print(''.join(["building with segment [", str(seg_start), ":",
str(pair[0]), "]"]))
if mask:
print(''.join([str(math.ceil(pair[1])), ' - ',
str(math.floor(pair[0]))]))
masked_segment = fill_in(mask, (math.ceil(pair[1]) -
math.floor(pair[0])), rate)
segmented_sound.append(masked_segment)
print(''.join(["building with mask [", str(pair[0]), ":",
str(pair[1]), "]"]))
except NameError:
segmented_sound.append(fill_in(mask, math.ceil(pair[1]), rate))
print(''.join(["building with mask [", str(pair[0]), ":",
str(len(with_silence) * rate), "]"]))
seg_start = pair[1]
if(seg_start < (len(with_silence) * rate)):
try:
segmented_sound.append(with_silence.get_sample_slice(seg_start,
None))
print(''.join(["final building with segment [", str(seg_start),
":", str(len(
with_silence) * rate), "]"]))
except:
print("This is the end.")
if len(segmented_sound) > 1:
new_sound = segmented_sound[0]
for i, v in enumerate(segmented_sound):
if i > 0:
new_sound = new_sound + segmented_sound[i]
else:
new_sound = segmented_sound[0]
return new_sound
def fill_in(mask, duration, rate):
"""
Get a section of a mask of the specified duration.
Parameters
----------
mask : pydub audio segment
the mask to clip from
duration : int
the required duration in frames
Returns
-------
mask : pydub audio segment
the mask clipped to specified duration
"""
mask_len = math.ceil(len(mask) * rate)
print(str(mask_len))
print(str(duration))
if (duration >= mask_len):
start = 0
else:
start = random.randrange(0, (mask_len - math.floor(
duration)), 1)
print(''.join(["fill in ", str(start), ":", str((start + duration))]))
mask = mask.get_sample_slice(math.floor(start), math.ceil(start +
duration))
return mask
def get_ambient_clips(path):
"""
Find sections of ambient noise at least 2 seconds long.
Parameters
----------
path : string
absolute path to waveform file to process
Returns
-------
ambience : list of tuples
a list of (start-time, stop-time) tuples of ambient segments
"""
# read waveform file
if os.access(path, os.R_OK):
print(''.join([' Reading ', path]))
else:
print(''.join([' !!! ', path,
' : insufficient permission to read']))
return []
input_data = wavfile.read(path)
print(' read')
# get numpy array of amplitude values
audio = input_data[1][:, 0]
print(''.join([' left channel: ',str(len(audio))]))
# get rate
# rate = input_data[0]
# t = np.arange(len(audio)) / rate
# calculate envelope
print(' calculating envelope')
envelope = np.abs(signal.hilbert(audio))
# initialize start, stop, and ambiance lists
starts, stops, ambience = ([] for i in range(3))
# initialize start flag
start_flag = True
# set threshold
threshold = np.median(envelope)
print(' finding ambient segments')
for index, point in enumerate(envelope):
# get beginnings of ambient segments
if (start_flag and point < threshold and point != 0):
start_flag = False
starts.append(index)
# get ends of ambient segments
elif (point > threshold and (not start_flag)):
if(index >= starts[-1] + 88200):
start_flag = True
stops.append(index)
# make tuple list
for i, v in enumerate(stops):
ambience.append((starts[i], v))
# return tuple list
return(ambience)
def grow_mask(mask, size):
"""
Function to create a clone mask from an ambient clip.
Parameters
----------
mask : pydub audio segment
the ambient clip from which to create the clone
size : int
how many milliseconds the clone should last
Returns
-------
mask : pydub audio segment
an ambient clone mask of specified duration
"""
print("grow mask")
while len(mask) < size:
mask = mask + mask.reverse()
return mask
def replace_silence(original, mask, rate):
"""
Function to create a clip in which silences are replaced by masks.
Parameters
----------
original : pydub audio segment
the original sound file
mask : pydub audio segment
the ambient clip from which to replace the silence
rate : float
frames per millisecond (fps / 1000)
Returns
-------
new_sound : pydub audio segment
the original sound with silence replaced from the specified mask
silence_borders : list
a list of start and stop times, in milliseconds, of silent segments
"""
silence_borders = pydub.silence.detect_silence(original, min_silence_len=1,
silence_thresh = -60)
print("build new sound")
print(borders_ms_to_frames(silence_borders, rate))
new_sound = build_new_soundfile(original, rate, mask, silence_borders)
return new_sound, silence_borders
def silence_table(top_dir, silence_borders):
out_path = '_'.join([top_dir.strip('.wav'), "silences.csv"])
with open(out_path, "w") as f:
writer = csv.writer(f)
writer.writerows(silence_borders)
def check_conditions(directory, conditions):
"""
Function to check if a condition is known and accounted for.
Parameters
----------
directory : string
the name of a directory that is the condition to check for
conditions : list of strings
a list of known and accounted for conditions
Returns
-------
directory : string or None
either returns an unaccounted-for condition or returns None.
"""
for condition in conditions:
if condition in directory:
return None
return directory
def build_adultTalk_dataframe(adults_removed_dict):
"""
Function to build a dataframe specifying which conditions included an
audibly speaking adult.
Parameters
----------
adults_removed_dict : dictionary
a dictionary of {URSI, list of string} pairs in which each string is
the name of a file in which an adult was audible
Returns
-------
adults_removed_df : pandas data frame
a dataframe with one row per URSI and one column per condition
indicating whether an adult spoke during that condition
"""
conditions = ["button no", "button w", "vocal no", "vocal w"]
| |
#
# Copyright 2004,2005 <NAME> <<EMAIL>>
#
# This file forms part of Infotrope Polymer.
#
# Infotrope Polymer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Infotrope Polymer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Infotrope Polymer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import wx
import infotrope.acap
import polymer.addressbook
import wx.stc
import email.Utils
import email.Header
import email.Parser
#import email.MIMEText
import time
import StringIO
import polymer.encode
import polymer.dialogs
import infotrope.encoding
import infotrope.message
import weakref
stcargs = 2
ID_TO = wx.NewId()
ID_CC = wx.NewId()
ID_BCC = wx.NewId()
recipient_to_name = {
ID_TO: 'To',
ID_CC: 'Cc',
ID_BCC: 'Bcc'
}
ID_MESSAGE_SEND = wx.NewId()
ID_MESSAGE_CLOSE = wx.NewId()
ID_MESSAGE_SAVE = wx.NewId()
ID_MESSAGE_QUIT = wx.NewId()
ID_MESSAGE_TO = wx.NewId()
ID_MESSAGE_CHECK = wx.NewId()
ID_MESSAGE_FORMAT = wx.NewId()
ID_MESSAGE_ATTACH = wx.NewId()
ID_MESSAGE_SAVESIG = wx.NewId()
class Attachment:
def __init__( self, path, filename=None ):
self.path = path
self.filename = filename
if self.filename is None and self.path is not None:
import os.path
self.filename = os.path.basename(self.path)
self.type = None
self.subtype = None
self.description = None
self.disposition = 'attachment'
self.encoding = None
self.transfer_encoding = None
self.uris = []
def set_type( self, mime_type ):
( self.type, self.subtype ) = mime_type.split('/')
def find_mime( self ):
import imghdr
self.extension = imghdr.what( open( self.path, "rb" ) )
if self.extension is not None:
self.type = 'image'
else:
import sndhdr
self.extension = sndhdr.what( self.path )
if self.extension is not None:
self.type = 'audio'
self.extension = self.extension[0]
else:
import mimetypes
extbr = self.path.rfind( '.' )
self.extension = self.path[extbr+1:]
t = mimetypes.guess_type( self.path )
if t[0] is not None:
self.set_type( t[0] )
self.encoding = t[1]
else:
ft = wx.TheMimeTypesManager.GetFileTypeFromExtension( self.extension )
if ft is not None:
self.set_type( ft.GetMimeType() )
else:
self.type = 'application'
self.subtype = 'octet-stream'
if self.subtype is None:
ft = wx.TheMimeTypesManager.GetFileTypeFromExtension( self.extension )
self.subtype = ft.GetMimeType()[ft.GetMimeType().index('/')+1:]
if self.type == 'text':
f = open( self.path )
s = f.read(1024) + f.readline()
self.charset = 'x-unknown'
for x in ['us-ascii','utf8','iso-8859-1']:
try:
s.decode( x )
self.charset = x
break
except:
pass
def part( self ):
if self.type is None:
self.find_mime()
p = infotrope.message.BasePart()
if self.type =='text':
p.raw_data = open(self.path).read()
p.raw_data = p.raw_data.decode( self.charset )
else:
p.raw_data = open(self.path,"rb").read()
p.mtype = self.type
p.msubtype = self.subtype
p.dtype = self.disposition
p.dparams['filename'] = self.filename
if self.description is not None:
p.description = self.description
for u in self.uris:
p.saved_as( u )
return p
def image( self ):
if self.type is None:
self.find_mime()
return wx.GetApp().get_mime_icon(self.type, self.subtype)
def saved_as( self, u ):
self.uris = u
class AttachmentMessage(Attachment):
def __init__( self, msg, part=None ):
descr = msg.envelope().Subject
if descr is None:
descr = u'Forwarded message'
filename = None
if part is not None:
filename = part.filename()
descr = part.description
Attachment.__init__( self, None, filename or descr )
self.filename = filename
self.msg = msg
self.spart = part or msg.parts()
self.description = descr
self.mpart = infotrope.message.MessagePart( self.msg, self.spart )
def find_mime( self ):
self.set_type( self.spart.type.lower()+'/'+self.spart.subtype.lower() )
def part( self ):
return self.mpart
def saved_as( self, us ):
for u in us:
self.mpart.saved_as( u )
class AttachDropTarget( wx.PyDropTarget ):
def __init__(self, msgbase):
import polymer.dragdrop
wx.PyDropTarget.__init__(self)
self.msgbase = weakref.ref(msgbase)
self.data = polymer.dragdrop.URLDataObject()
self.SetDataObject(self.data)
def OnDragOver(self, x, y, d):
return wx.DragCopy
def OnDrop(self, x, y):
return True
def OnData(self, x, y, r):
try:
import infotrope.url
if not self.GetData():
return wx.DragNone
ut = self.data.GetURLs()
uris = [infotrope.url.URL(utx) for utx in ut]
for u in uris:
if u.scheme in ['imap','imaps']:
if u.type not in ['MESSAGE','SECTION','PARTIAL']:
continue
srv = wx.GetApp().connection(u)
mbx = srv.mailbox(u.mailbox)
if mbx.uidvalidity()!=u.uidvalidity:
continue
msg = mbx[u.uid]
part = None
if u.section:
part = msg.parts().find_id(u.section)
self.msgbase().add_attachment(AttachmentMessage(msg, part))
elif u.scheme == 'file':
if u.server:
import socket
if u.server!='localhost' and u.server!=socket.getfqdn():
continue
self.msgbase().add_attachment(Attachment(u.path.decode('urlencode')))
else:
continue
return wx.DragCopy
except:
pass
return wx.DragNone
class MessageBase( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__( self, parent, -1, "New Message - Infotrope Polymer", size=(600,400), name='polymer' )
self.CreateStatusBar()
self.SetIcon( wx.GetApp().icon )
menu = wx.Menu()
menu.Append( ID_MESSAGE_SEND, "&Send", "Send this message" )
menu.Append( ID_MESSAGE_SAVE, "Sa&ve", "Save this message as a draft and continue" )
menu.Append( ID_MESSAGE_CLOSE, "&Close", "Save this message as a draft and close" )
menu.Append( ID_MESSAGE_QUIT, "&Discard", "Discard this message and close" )
tools = wx.Menu()
tools.Append( ID_MESSAGE_CHECK, "&Check Addresses", "Check addresses in this message" )
tools.AppendCheckItem( ID_MESSAGE_FORMAT, "&Flowed Format", "Use Flowed Format" )
tools.Check( ID_MESSAGE_FORMAT, True )
tools.Append( ID_MESSAGE_ATTACH, "&Attach File", "Attach a file to this message" )
tools.Append( ID_MESSAGE_SAVESIG, "&Save Signature", "Save this signature" )
menuBar = wx.MenuBar()
menuBar.Append( menu, "&File" )
menuBar.Append( tools, "&Tools" )
self.SetMenuBar( menuBar )
p = wx.Panel( self, -1 )
self._sizer = wx.BoxSizer( wx.VERTICAL )
self._header_sizer = wx.FlexGridSizer( cols = 2, hgap = 5, vgap = 5 )
self._header_sizer.Add( wx.StaticText( p, -1, "From" ) )
froms = wx.GetApp().personalities().entries()
while len(froms)==0:
# No identies setup, but we want to send email.
dlg = polymer.dialogs.MessageDialog( parent, "You're trying to send an email, but I don't know your email address yet.\nYou'll need these details handy.", "Warning - Infotrope Polymer", wx.OK|wx.ICON_INFORMATION )
dlg.ShowModal()
dlg = polymer.personality.IdentityEditCreate( self )
dlg.ShowModal()
l = len(wx.GetApp().personalities())
froms = wx.GetApp().personalities().entries()
self._from_sel_id = wx.NewId()
froms_ui = [ polymer.encode.encode_ui( x.decode('utf-8') ) for x in froms ]
from_ch = self.select_identity()
self._from_sel = wx.Choice( p, self._from_sel_id, choices=froms_ui )
self._from_sel.SetStringSelection( froms_ui[ from_ch ] )
e = wx.GetApp().personalities()[ froms[ from_ch ] ]
tmp_from = email.Utils.formataddr( (e['personality.Real-Name'], e['personality.Return-Address']) )
self._from = wx.TextCtrl( p, -1, tmp_from, style=wx.TE_READONLY )
self._from_sizer = wx.BoxSizer( wx.HORIZONTAL )
self._from_sizer.Add( self._from_sel, 0 )
self._from_sizer.Add( self._from, 99, wx.EXPAND )
self._from_sizer.Add( wx.Button( p, ID_MESSAGE_SEND, "Send" ), 0, wx.LEFT, border=5 )
self._header_sizer.Add( self._from_sizer, 1, wx.EXPAND )
all_recips = []
self.headers = []
for id in ID_TO,ID_CC,ID_BCC:
rs = self.get_recipients( id )
rsb = self.get_base_recipients( id, e )
if rs is None:
rs = rsb
elif rsb is not None:
rs += rsb
if rs is not None:
rs = [ r for r in rs if r._email_address not in all_recips ]
all_recips += [ r._email_address for r in rs ]
for r in rs:
ch = wx.Choice( p, -1, choices=recipient_to_name.values(), name=recipient_to_name[id])
recip = polymer.addressbook.RecipientCtrl( p, -1, r )
self._header_sizer.Add( ch, 0, wx.ALIGN_RIGHT )
self._header_sizer.Add( recip, 1, wx.EXPAND )
self.headers.append( (ch,recip) )
self.add_header(p,None)
subj = self.get_subject()
self._subject = wx.TextCtrl( p, -1, subj )
self._header_sizer.Add( wx.StaticText( p, -1, "Subject" ) )
self._header_sizer.Add( self._subject, 1, wx.EXPAND )
self.update_title()
self._subject_changed = False
self._header_sizer.AddGrowableCol( 1 )
self._sizer.Add( self._header_sizer, 0, wx.ADJUST_MINSIZE|wx.EXPAND|wx.ALL, 10 )
#self._text = wx.TextCtrl( p, -1, txte, style = wx.TE_MULTILINE|wx.TE_PROCESS_TAB|wx.TE_NOHIDESEL )
self._sash = wx.SplitterWindow( p, -1 )
self._text = wx.stc.StyledTextCtrl( self._sash, -1 )
self._text.SetWrapMode( wx.stc.STC_WRAP_WORD )
self._text.SetMargins( 0, 0 )
for i in range(3):
self._text.SetMarginWidth( i, 0 )
if 'unicode' in wx.PlatformInfo:
self._text.SetCodePage( wx.stc.STC_CP_UTF8 )
self._text.SetText( '' )
self.add_text( self._text )
self.update_sig()
self._text.SetSavePoint()
self._attachments = self.get_attachments()
self._attach_drop1 = AttachDropTarget(self)
self._attach_panel = wx.Panel( self._sash, -1, style=wx.SUNKEN_BORDER )
self._attach_sizer = wx.BoxSizer( wx.VERTICAL )
p1 = wx.Panel( self._attach_panel, -1, style=wx.RAISED_BORDER )
ss = wx.BoxSizer( wx.HORIZONTAL )
ss.Add( wx.StaticText( p1, -1, "Attachments. Show as:" ), 0, wx.ALL, border=5 )
c = wx.Choice( p1, -1, choices=[ 'Icons','List','Detail' ] )
wx.EVT_CHOICE( c, -1, self.change_attach_display )
ss.Add( c, 0, wx.ALL, border=5 )
p1.SetSizer( ss )
ss.Fit( p1 )
p1.SetAutoLayout( True )
self._attach_sizer.Add( p1, 0, wx.EXPAND )
self._attach_list = wx.ListCtrl( self._attach_panel, -1, style=wx.LC_SMALL_ICON )
self._attach_sizer.Add( self._attach_list, 1, wx.EXPAND )
self._attach_panel.SetSizer( self._attach_sizer )
self._attach_panel.SetAutoLayout( True )
self._attach_sizer.Fit( self._attach_panel )
self._attach_panel.SetDropTarget(self._attach_drop1)
self._sizer.Add( self._sash, 1, wx.ADJUST_MINSIZE|wx.GROW|wx.EXPAND )
self._attach_placeholder = wx.Panel( p, -1, style=wx.RAISED_BORDER )
ss = wx.BoxSizer( wx.HORIZONTAL )
ss.Add( wx.StaticText( self._attach_placeholder, -1, "No attachments (drag to here)", ), 1, wx.ALL|wx.EXPAND, border=5 )
self._attach_placeholder.SetSizer(ss)
self._attach_placeholder.SetAutoLayout(True)
self._attach_drop2 = AttachDropTarget(self)
self._attach_placeholder.SetDropTarget(self._attach_drop2)
ss.Fit(self._attach_placeholder)
self._sizer.Add( self._attach_placeholder, 0, wx.EXPAND )
self._sash.SplitHorizontally( self._text, self._attach_panel, -50 )
p.SetSizer( self._sizer )
p.SetAutoLayout( 1 )
self._sizer.Fit( p )
self._content_panel = p
wx.EVT_MENU( self, ID_MESSAGE_SEND, self.message_send )
| |
compound dtype
# normally for cpd dtype, __resolve_dtype__ takes a list of DtypeSpec objects
cpd_type = [dict(name='cpd_float', dtype=np.dtype('float64')),
dict(name='cpd_int', dtype=np.dtype('int32'))]
kwargs['dtype'] = HDF5IO.__resolve_dtype__(cpd_type, my_list[0])
dset = self.file.create_dataset('dset%d' % self.dset_counter, data=np.array(my_list, **kwargs))
if H5PY_3 and isinstance(my_list[0], str):
return StrDataset(dset, None) # return a wrapper to read data as str instead of bytes
else:
# NOTE: h5py.Dataset with compound dtype are read as numpy arrays with compound dtype, not tuples
return dset
def _wrap_check(self, my_list):
# getitem on h5dataset backed data will return np.array
kwargs = dict()
if isinstance(my_list[0], str):
kwargs['dtype'] = H5_TEXT
elif isinstance(my_list[0], tuple):
cpd_type = [dict(name='cpd_float', dtype=np.dtype('float64')),
dict(name='cpd_int', dtype=np.dtype('int32'))]
kwargs['dtype'] = np.dtype([(x['name'], x['dtype']) for x in cpd_type])
# compound dtypes with str are read as bytes, see https://github.com/h5py/h5py/issues/1751
return np.array(my_list, **kwargs)
class TestVectorIndex(TestCase):
def test_init_empty(self):
foo = VectorData(name='foo', description='foo column')
foo_ind = VectorIndex(name='foo_index', target=foo, data=list())
self.assertEqual(foo_ind.name, 'foo_index')
self.assertEqual(foo_ind.description, "Index for VectorData 'foo'")
self.assertIs(foo_ind.target, foo)
self.assertListEqual(foo_ind.data, list())
def test_init_data(self):
foo = VectorData(name='foo', description='foo column', data=['a', 'b', 'c'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3])
self.assertListEqual(foo_ind.data, [2, 3])
self.assertListEqual(foo_ind[0], ['a', 'b'])
self.assertListEqual(foo_ind[1], ['c'])
class TestDoubleIndex(TestCase):
def test_index(self):
# row 1 has three entries
# the first entry has two sub-entries
# the first sub-entry has two values, the second sub-entry has one value
# the second entry has one sub-entry, which has one value
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
self.assertListEqual(foo_ind[0], ['a11', 'a12'])
self.assertListEqual(foo_ind[1], ['a21'])
self.assertListEqual(foo_ind[2], ['b11'])
self.assertListEqual(foo_ind_ind[0], [['a11', 'a12'], ['a21']])
self.assertListEqual(foo_ind_ind[1], [['b11']])
def test_add_vector(self):
# row 1 has three entries
# the first entry has two sub-entries
# the first sub-entry has two values, the second sub-entry has one value
# the second entry has one sub-entry, which has one value
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
foo_ind_ind.add_vector([['c11', 'c12', 'c13'], ['c21', 'c22']])
self.assertListEqual(foo.data, ['a11', 'a12', 'a21', 'b11', 'c11', 'c12', 'c13', 'c21', 'c22'])
self.assertListEqual(foo_ind.data, [2, 3, 4, 7, 9])
self.assertListEqual(foo_ind[3], ['c11', 'c12', 'c13'])
self.assertListEqual(foo_ind[4], ['c21', 'c22'])
self.assertListEqual(foo_ind_ind.data, [2, 3, 5])
self.assertListEqual(foo_ind_ind[2], [['c11', 'c12', 'c13'], ['c21', 'c22']])
class TestDTDoubleIndex(TestCase):
def test_double_index(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
table = DynamicTable('table0', 'an example table', columns=[foo, foo_ind, foo_ind_ind])
self.assertIs(table['foo'], foo_ind_ind)
self.assertIs(table.foo, foo)
self.assertListEqual(table['foo'][0], [['a11', 'a12'], ['a21']])
self.assertListEqual(table[0, 'foo'], [['a11', 'a12'], ['a21']])
self.assertListEqual(table[1, 'foo'], [['b11']])
def test_double_index_reverse(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
table = DynamicTable('table0', 'an example table', columns=[foo_ind_ind, foo_ind, foo])
self.assertIs(table['foo'], foo_ind_ind)
self.assertIs(table.foo, foo)
self.assertListEqual(table['foo'][0], [['a11', 'a12'], ['a21']])
self.assertListEqual(table[0, 'foo'], [['a11', 'a12'], ['a21']])
self.assertListEqual(table[1, 'foo'], [['b11']])
def test_double_index_colnames(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
bar = VectorData(name='bar', description='bar column', data=[1, 2])
table = DynamicTable('table0', 'an example table', columns=[foo, foo_ind, foo_ind_ind, bar],
colnames=['foo', 'bar'])
self.assertTupleEqual(table.columns, (foo_ind_ind, foo_ind, foo, bar))
def test_double_index_reverse_colnames(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
bar = VectorData(name='bar', description='bar column', data=[1, 2])
table = DynamicTable('table0', 'an example table', columns=[foo_ind_ind, foo_ind, foo, bar],
colnames=['bar', 'foo'])
self.assertTupleEqual(table.columns, (bar, foo_ind_ind, foo_ind, foo))
class TestDTDoubleIndexSkipMiddle(TestCase):
def test_index(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
msg = "Found VectorIndex 'foo_index_index' but not its target 'foo_index'"
with self.assertRaisesWith(ValueError, msg):
DynamicTable('table0', 'an example table', columns=[foo_ind_ind, foo])
class TestDynamicTableAddIndexRoundTrip(H5RoundTripMixin, TestCase):
def setUpContainer(self):
table = DynamicTable('table0', 'an example table')
table.add_column('foo', 'an int column', index=True)
table.add_row(foo=[1, 2, 3])
return table
class TestDynamicTableAddEnumRoundTrip(H5RoundTripMixin, TestCase):
def setUpContainer(self):
table = DynamicTable('table0', 'an example table')
table.add_column('bar', 'an enumerable column', enum=True)
table.add_row(bar='a')
table.add_row(bar='b')
table.add_row(bar='a')
table.add_row(bar='c')
return table
class TestDynamicTableAddEnum(TestCase):
def test_enum(self):
table = DynamicTable('table0', 'an example table')
table.add_column('bar', 'an enumerable column', enum=True)
table.add_row(bar='a')
table.add_row(bar='b')
table.add_row(bar='a')
table.add_row(bar='c')
rec = table.to_dataframe()
exp = pd.DataFrame(data={'bar': ['a', 'b', 'a', 'c']}, index=pd.Series(name='id', data=[0, 1, 2, 3]))
pd.testing.assert_frame_equal(exp, rec)
def test_enum_index(self):
table = DynamicTable('table0', 'an example table')
table.add_column('bar', 'an indexed enumerable column', enum=True, index=True)
table.add_row(bar=['a', 'a', 'a'])
table.add_row(bar=['b', 'b', 'b', 'b'])
table.add_row(bar=['c', 'c'])
rec = table.to_dataframe()
exp = pd.DataFrame(data={'bar': [['a', 'a', 'a'],
['b', 'b', 'b', 'b'],
['c', 'c']]},
index=pd.Series(name='id', data=[0, 1, 2]))
pd.testing.assert_frame_equal(exp, rec)
class TestDynamicTableInitIndexRoundTrip(H5RoundTripMixin, TestCase):
def setUpContainer(self):
foo = VectorData(name='foo', description='foo column', data=['a', 'b', 'c'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3])
# NOTE: on construct, columns are ordered such that indices go before data, so create the table that way
# for proper comparison of the columns list
table = DynamicTable('table0', 'an example table', columns=[foo_ind, foo])
return table
class TestDoubleIndexRoundtrip(H5RoundTripMixin, TestCase):
def setUpContainer(self):
foo = VectorData(name='foo', description='foo column', data=['a11', 'a12', 'a21', 'b11'])
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
foo_ind_ind = VectorIndex(name='foo_index_index', target=foo_ind, data=[2, 3])
# NOTE: on construct, columns are ordered such that indices go before data, so create the table that way
# for proper comparison of the columns list
table = DynamicTable('table0', 'an example table', columns=[foo_ind_ind, foo_ind, foo])
return table
class TestDataIOColumns(H5RoundTripMixin, TestCase):
def setUpContainer(self):
self.chunked_data = H5DataIO(
data=[i for i in range(10)],
chunks=(3,),
fillvalue=-1,
)
self.compressed_data = H5DataIO(
data=np.arange(10),
compression=1,
shuffle=True,
fletcher32=True,
allow_plugin_filters=True,
)
foo = VectorData(name='foo', description='chunked column', data=self.chunked_data)
bar = VectorData(name='bar', description='chunked column', data=self.compressed_data)
# NOTE: on construct, columns are ordered such that indices go before data, so create the table that way
# for proper comparison of the columns list
table = DynamicTable('table0', 'an example table', columns=[foo, bar])
table.add_row(foo=1, bar=1)
return table
def test_roundtrip(self):
super().test_roundtrip()
with h5py.File(self.filename, 'r') as f:
chunked_dset = f['foo']
self.assertTrue(np.all(chunked_dset[:] == self.chunked_data.data))
self.assertEqual(chunked_dset.chunks, (3,))
self.assertEqual(chunked_dset.fillvalue, -1)
compressed_dset = f['bar']
self.assertTrue(np.all(compressed_dset[:] == self.compressed_data.data))
self.assertEqual(compressed_dset.compression, 'gzip')
self.assertEqual(compressed_dset.shuffle, True)
self.assertEqual(compressed_dset.fletcher32, True)
class TestDataIOIndexedColumns(H5RoundTripMixin, TestCase):
def setUpContainer(self):
self.chunked_data = H5DataIO(
data=np.arange(30).reshape(5, 2, 3),
chunks=(1, 1, 3),
fillvalue=-1,
)
self.compressed_data = H5DataIO(
data=np.arange(30).reshape(5, 2, 3),
compression=1,
shuffle=True,
fletcher32=True,
allow_plugin_filters=True,
)
foo = VectorData(name='foo', description='chunked column', data=self.chunked_data)
foo_ind = VectorIndex(name='foo_index', target=foo, data=[2, 3, 4])
bar = VectorData(name='bar', description='chunked column', data=self.compressed_data)
bar_ind = VectorIndex(name='bar_index', target=bar, data=[2, 3, 4])
# NOTE: on construct, columns are ordered such that indices go before data, so create the table that way
# for proper comparison of the columns list
table = DynamicTable('table0', 'an example table', columns=[foo_ind, foo, bar_ind, bar])
# check for add_row
table.add_row(foo=np.arange(30).reshape(5, 2, 3), bar=np.arange(30).reshape(5, 2, 3))
return table
def test_roundtrip(self):
super().test_roundtrip()
with h5py.File(self.filename, 'r') as f:
chunked_dset = f['foo']
self.assertTrue(np.all(chunked_dset[:] == self.chunked_data.data))
self.assertEqual(chunked_dset.chunks, (1, 1, 3))
self.assertEqual(chunked_dset.fillvalue, -1)
compressed_dset = f['bar']
self.assertTrue(np.all(compressed_dset[:] == self.compressed_data.data))
self.assertEqual(compressed_dset.compression, 'gzip')
self.assertEqual(compressed_dset.shuffle, True)
self.assertEqual(compressed_dset.fletcher32, True)
class TestDataIOIndex(H5RoundTripMixin, TestCase):
def setUpContainer(self):
self.chunked_data = H5DataIO(
data=np.arange(30).reshape(5, 2, 3),
chunks=(1, 1, 3),
fillvalue=-1,
maxshape=(None, 2, 3)
)
self.chunked_index_data = H5DataIO(
data=np.array([2, 3, 5], dtype=np.uint),
chunks=(2, ),
fillvalue=np.uint(10),
maxshape=(None,)
)
self.compressed_data = H5DataIO(
data=np.arange(30).reshape(5, 2, 3),
compression=1,
shuffle=True,
fletcher32=True,
allow_plugin_filters=True,
maxshape=(None, 2, 3)
)
self.compressed_index_data = H5DataIO(
data=np.array([2, 4, 5], dtype=np.uint),
compression=1,
shuffle=True,
fletcher32=False,
allow_plugin_filters=True,
maxshape=(None,)
)
foo = VectorData(name='foo', description='chunked column', data=self.chunked_data)
foo_ind = VectorIndex(name='foo_index', target=foo, data=self.chunked_index_data)
bar = VectorData(name='bar', description='chunked column', data=self.compressed_data)
bar_ind = VectorIndex(name='bar_index', target=bar, data=self.compressed_index_data)
# NOTE: on construct, columns are ordered such that indices go before data, so create the table that way
# for proper comparison of the columns list
table = DynamicTable('table0', 'an example table', columns=[foo_ind, foo, bar_ind, bar],
id=H5DataIO(data=[0, 1, 2], chunks=True, maxshape=(None,)))
# check for add_row
table.add_row(foo=np.arange(30).reshape(5, 2, 3),
bar=np.arange(30).reshape(5, 2, 3))
return table
def test_append(self, cache_spec=False):
"""Write the container to an HDF5 file, read the container from the file, and append to it."""
with HDF5IO(self.filename, manager=get_manager(), mode='w') as write_io:
write_io.write(self.container, cache_spec=cache_spec)
self.reader = HDF5IO(self.filename, manager=get_manager(), mode='a')
read_table = self.reader.read()
data = np.arange(30, 60).reshape(5, 2, 3)
read_table.add_row(foo=data, bar=data)
np.testing.assert_array_equal(read_table['foo'][-1], data)
class TestDTRReferences(TestCase):
def setUp(self):
self.filename = 'test_dtr_references.h5'
def tearDown(self):
remove_test_file(self.filename)
def test_dtr_references(self):
"""Test roundtrip of a table with a ragged DTR to another table containing a column of references."""
group1 = Container('group1')
group2 = Container('group2')
table1 = DynamicTable(
name='table1',
description='test table | |
dict's keys
if not labels_set.issubset(set(labels_mapping)):
# because there's some label in labels
# that's not in labels_mapping
print('found labels in {} not in labels_mapping, '
'skipping file'.format(filename))
continue
print('making .spect file for {}'.format(filename))
if 'freq_cutoffs' in spect_params:
dat = spect_utils.butter_bandpass_filter(dat,
spect_params['freq_cutoffs'][0],
spect_params['freq_cutoffs'][1],
fs)
specgram_params = {'fft_size': spect_params['fft_size'],
'step_size': spect_params['step_size']}
if 'thresh' in spect_params:
specgram_params['thresh'] = spect_params['thresh']
if 'transform_type' in spect_params:
specgram_params['transform_type'] = spect_params['transform_type']
spect, freq_bins, time_bins = spect_utils.spectrogram(dat, fs,
**specgram_params)
if 'freq_cutoffs' in spect_params:
f_inds = np.nonzero((freq_bins >= spect_params['freq_cutoffs'][0]) &
(freq_bins < spect_params['freq_cutoffs'][1]))[0] # returns tuple
spect = spect[f_inds, :]
freq_bins = freq_bins[f_inds]
this_labels = [labels_mapping[label]
for label in this_labels_str]
this_labeled_timebins = make_labeled_timebins_vector(this_labels,
onsets,
offsets,
time_bins,
labels_mapping['silent_gap_label'])
if not 'timebin_dur' in locals():
timebin_dur = np.around(np.mean(np.diff(time_bins)), decimals=3)
else:
curr_timebin_dur = np.around(np.mean(np.diff(time_bins)), decimals=3)
# below truncates any decimal place past decade
curr_timebin_dur = np.trunc(curr_timebin_dur
* decade) / decade
if not np.allclose(curr_timebin_dur, timebin_dur):
raise ValueError("duration of timebin in file {}, {}, did not "
"match duration of timebin from other .mat "
"files, {}.".format(curr_timebin_dur,
filename,
timebin_dur))
spect_dur = time_bins.shape[-1] * timebin_dur
spect_dict = {'spect': spect,
'freq_bins': freq_bins,
'time_bins': time_bins,
'labels': this_labels_str,
'labeled_timebins': this_labeled_timebins,
'timebin_dur': timebin_dur,
'spect_params': spect_params,
'labels_mapping': labels_mapping}
spect_dict_filename = os.path.join(
os.path.normpath(output_dir),
os.path.basename(filename) + '.spect')
joblib.dump(spect_dict, spect_dict_filename)
spect_files.append((spect_dict_filename, spect_dur, this_labels_str))
spect_files_path = os.path.join(output_dir, 'spect_files')
joblib.dump(spect_files, spect_files_path)
return spect_files_path
def make_data_dicts(output_dir,
total_train_set_duration,
validation_set_duration,
test_set_duration,
labelset,
spect_files=None):
"""function that loads data and saves in dictionaries
Parameters
----------
output_dir : str
path to output_dir containing .spect files
total_train_set_duration : float
validation_set_duration : float
test_set_duration : float
all in seconds
labelset : list
of str, labels used
spect_files : str
full path to file containing 'spect_files' list of tuples
saved by function make_spects_from_list_of_files.
Default is None, in which case this function looks for
a file named 'spect_files' in output_dir.
Returns
-------
saved_data_dict_paths : dict
with keys {'train', 'test', 'val'} and values being the path
to which the data_dict was saved
Saves three 'data_dict' files (train, validation, and test)
in output_dir, with following structure:
spects : list
of ndarray, spectrograms from audio files
filenames : list
same length as spects, filename of each audio file that was converted to spectrogram
freq_bins : ndarray
vector of frequencies where each value is a bin center. Same for all spectrograms
time_bins : list
of ndarrays, each a vector of times where each value is a bin center.
One for each spectrogram
labelset : list
of strings, labels corresponding to each spectrogram
labeled_timebins : list
of ndarrays, each same length as time_bins but value is a label for that bin.
In other words, the labels vector is mapped onto the time_bins vector for the
spectrogram.
X : ndarray
X_train, X_val, or X_test, depending on which data_dict you are looking at.
Some number of spectrograms concatenated, enough so that the total duration
of the spectrogram in time bins is equal to or greater than the target duration.
If greater than target, then X is truncated so it is equal to the target.
Y : ndarray
Concatenated labeled_timebins vectors corresponding to the spectrograms in X.
spect_ID_vector : ndarray
Vector where each element is an ID for a song. Used to randomly grab subsets
of data of a target duration while still having the subset be composed of
individual songs as much as possible. So this vector will look like:
[0, 0, 0, ..., 1, 1, 1, ... , n, n, n] where n is equal to or (a little) less
than the length of spects. spect_ID_vector.shape[-1] is the same as X.shape[-1]
and Y.shape[0].
timebin_dur : float
duration of a timebin in seconds from spectrograms
spect_params : dict
parameters for computing spectrogram as specified in config.ini file.
Will be checked against .ini file when running other cli such as learn_curve.py
labels_mapping : dict
maps str labels for syllables to consecutive integers.
As explained in docstring for make_spects_from_list_of_files.
"""
if not os.path.isdir(output_dir):
raise NotADirectoryError('{} not recognized '
'as a directory'.format(output_dir))
if spect_files is None:
spect_files = glob(os.path.join(output_dir,'spect_files'))
if spect_files == []: # if glob didn't find anything
raise FileNotFoundError("did not find 'spect_files' file in {}"
.format(output_dir))
elif len(spect_files) > 1:
raise ValueError("found than more than one 'spect_files' in {}:\n{}"
.format(output_dir, spect_files))
else:
spect_files = spect_files[0]
if not os.path.isfile(spect_files):
raise FileNotFoundError('{} not recognized as a file'
.format(spect_files))
spect_files = joblib.load(spect_files)
total_spects_dur = sum([spect[1] for spect in spect_files])
total_dataset_dur = sum([total_train_set_duration,
validation_set_duration,
test_set_duration])
if total_spects_dur < total_dataset_dur:
raise ValueError('Total duration of all .cbin files, {} seconds,'
' is less than total target duration of '
'training, validation, and test sets, '
'{} seconds'
.format(total_spects_dur, total_dataset_dur))
# main loop that gets datasets
iter = 1
all_labels_err = ('Did not successfully divide data into training, '
'validation, and test sets of sufficient duration '
'after 1000 iterations.'
' Try increasing the total size of the data set.')
while 1:
spect_files_copy = copy.deepcopy(spect_files)
train_spects = []
val_spects = []
test_spects = []
total_train_dur = 0
val_dur = 0
test_dur = 0
choice = ['train', 'val', 'test']
while 1:
# pop tuples off cbins_used list and append to randomly-chosen
# list, either train, val, or test set.
# Do this until the total duration for each data set is equal
# to or greater than the target duration for each set.
try:
ind = random.randint(0, len(spect_files_copy)-1)
except ValueError:
if len(spect_files_copy) == 0:
print('Ran out of spectrograms while dividing data into training, '
'validation, and test sets of specified durations. Iteration {}'
.format(iter))
iter += 1
break # do next iteration
else:
raise
a_spect = spect_files_copy.pop(ind)
which_set = random.randint(0, len(choice)-1)
which_set = choice[which_set]
if which_set == 'train':
train_spects.append(a_spect)
total_train_dur += a_spect[1] # ind 1 is duration
if total_train_dur >= total_train_set_duration:
choice.pop(choice.index('train'))
elif which_set == 'val':
val_spects.append(a_spect)
val_dur += a_spect[1] # ind 1 is duration
if val_dur >= validation_set_duration:
choice.pop(choice.index('val'))
elif which_set == 'test':
test_spects.append(a_spect)
test_dur += a_spect[1] # ind 1 is duration
if test_dur >= test_set_duration:
choice.pop(choice.index('test'))
if len(choice) < 1:
if np.sum(total_train_dur +
val_dur +
test_dur) < total_dataset_dur:
raise ValueError('Loop to find subsets completed but '
'total duration of subsets is less than '
'total duration specified by config file.')
else:
break
if iter > 1000:
raise ValueError('Could not find subsets of sufficient duration in '
'less than 1000 iterations.')
# make sure no contamination between data sets.
# If this is true, each set of filenames should be disjoint from others
train_spect_files = [tup[0] for tup in train_spects] # tup = a tuple
val_spect_files = [tup[0] for tup in val_spects]
test_spect_files = [tup[0] for tup in test_spects]
assert set(train_spect_files).isdisjoint(val_spect_files)
assert set(train_spect_files).isdisjoint(test_spect_files)
assert set(val_spect_files).isdisjoint(test_spect_files)
# make sure that each set contains all classes we
# want the network to learn
train_labels = itertools.chain.from_iterable(
[spect[2] for spect in train_spects])
train_labels = set(train_labels) # make set to get unique values
val_labels = itertools.chain.from_iterable(
[spect[2] for spect in val_spects])
val_labels = set(val_labels)
test_labels = itertools.chain.from_iterable(
[spect[2] for spect in test_spects])
test_labels = set(test_labels)
if train_labels != set(labelset):
iter += 1
if iter > 1000:
raise ValueError(all_labels_err)
else:
print('Train labels did not contain all labels in labelset. '
'Getting new training set. Iteration {}'
.format(iter))
continue
elif val_labels != set(labelset):
iter += 1
if iter > 1000:
raise ValueError(all_labels_err)
else:
print('Validation labels did not contain all labels in labelset. '
'Getting new validation set. Iteration {}'
.format(iter))
continue
elif test_labels != set(labelset):
iter += 1
if iter > 1000:
raise ValueError(all_labels_err)
else:
print('Test labels did not contain all labels in labelset. '
'Getting new test set. Iteration {}'
.format(iter))
continue
else:
break
saved_data_dict_paths = {}
for dict_name, spect_list, target_dur in zip(['train','val','test'],
[train_spects,val_spects,test_spects],
[total_train_set_duration,
validation_set_duration,
test_set_duration]):
spects = []
filenames = []
all_time_bins = []
labels = []
labeled_timebins = []
spect_ID_vector = []
for spect_ind, spect_file in enumerate(spect_list):
spect_dict = joblib.load(spect_file[0])
spects.append(spect_dict['spect'])
filenames.append(spect_file[0])
all_time_bins.append(spect_dict['time_bins'])
labels.append(spect_dict['labels'])
labeled_timebins.append(spect_dict['labeled_timebins'])
spect_ID_vector.extend([spect_ind] * spect_dict['time_bins'].shape[-1])
if 'freq_bins' in locals():
assert np.array_equal(spect_dict['freq_bins'], freq_bins)
else:
freq_bins = spect_dict['freq_bins']
if 'labels_mapping' in locals():
assert spect_dict['labels_mapping'] | |
botApiResponse object as second member
"""
data = {
}
return self.response(self.sendRequest("logOut", data), bool)
def close(self, ):
"""Use this method to close the bot instance before moving it from one local server to another. You need to delete the webhook before calling this method to ensure that the bot isn't launched again after server restart. The method will return error 429 in the first 10 minutes after the bot is launched. Returns True on success. Requires no parameters. [See Telegram API](https://core.telegram.org/bots/api#close)
- - - - -
**Args**:
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
}
return self.response(self.sendRequest("close", data), bool)
def sendMessage(self, chat_id: Union[int, str, ], text: str, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, entities: list = None, disable_web_page_preview: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send text messages. On success, the sent Message is returned. [See Telegram API](https://core.telegram.org/bots/api#sendmessage)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `text` :`str` Text of the message to be sent, 1-4096 characters after entities parsing
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the message text. See formatting options for more details.
- `entities` :`list` A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode
- `disable_web_page_preview` :`bool` Disables link previews for links in this message
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
- `reply_to_message_id` :`int` If the message is a reply, ID of the original message
- `allow_sending_without_reply` :`bool` Pass True, if the message should be sent even if the specified replied-to message is not found
**Returns:**
- A `tuple`, on success a `types.Message` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_web_page_preview is None:
disable_web_page_preview = self.default_disable_web_preview
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"text": text,
"parse_mode": parse_mode,
"entities": entities,
"disable_web_page_preview": disable_web_page_preview,
"disable_notification": disable_notification,
"reply_to_message_id": reply_to_message_id,
"allow_sending_without_reply": allow_sending_without_reply,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("sendMessage", data), types.Message)
def forwardMessage(self, chat_id: Union[int, str, ], message_id: int, from_chat_id: Union[int, str, ], disable_notification: bool = None):
"""Use this method to forward messages of any kind. Service messages can't be forwarded. On success, the sent Message is returned. [See Telegram API](https://core.telegram.org/bots/api#forwardmessage)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Message identifier in the chat specified in from_chat_id
- `from_chat_id` :`Union[int,str,]` Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername)
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
**Returns:**
- A `tuple`, on success a `types.Message` as first member and a botApiResponse object as second member
"""
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"from_chat_id": from_chat_id,
"disable_notification": disable_notification,
"message_id": message_id,
}
return self.response(self.sendRequest("forwardMessage", data), types.Message)
def copyMessage(self, chat_id: Union[int, str, ], message_id: int, from_chat_id: Union[int, str, ], caption: str = None, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, caption_entities: list = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to copy messages of any kind. Service messages and invoice messages can't be copied. The method is analogous to the method forwardMessage, but the copied message doesn't have a link to the original message. Returns the MessageId of the sent message on success. [See Telegram API](https://core.telegram.org/bots/api#copymessage)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Message identifier in the chat specified in from_chat_id
- `from_chat_id` :`Union[int,str,]` Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername)
- `caption` :`str` New caption for media, 0-1024 characters after entities parsing. If not specified, the original caption is kept
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the new caption. See formatting options for more details.
- `caption_entities` :`list` A JSON-serialized list of special entities that appear in the new caption, which can be specified instead of parse_mode
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
- `reply_to_message_id` :`int` If the message is a reply, ID of the original message
- `allow_sending_without_reply` :`bool` Pass True, if the message should be sent even if the specified replied-to message is not found
**Returns:**
- A `tuple`, on success a `types.MessageId` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"from_chat_id": from_chat_id,
"message_id": message_id,
"caption": caption,
"parse_mode": parse_mode,
"caption_entities": caption_entities,
"disable_notification": disable_notification,
"reply_to_message_id": reply_to_message_id,
"allow_sending_without_reply": allow_sending_without_reply,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("copyMessage", data), types.MessageId)
def sendPhoto(self, chat_id: Union[int, str, ], photo: Union[types.InputFile, str, ], caption: str = None, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, caption_entities: list = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send photos. On success, the sent Message is returned. [See Telegram API](https://core.telegram.org/bots/api#sendphoto)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `photo` :`Union[types.InputFile,str,]` Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. The photo must be at most 10 MB in size. The photo's width and height must not exceed 10000 in total. Width and height ratio must be at most 20. More info on Sending Files »
- `caption` :`str` Photo caption (may also be used when resending photos by file_id), 0-1024 characters after entities parsing
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the photo caption. See formatting options for more details.
- `caption_entities` :`list` A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
- `reply_to_message_id` :`int` If the message is a reply, ID of the original message
- `allow_sending_without_reply` :`bool` Pass True, if the message should be sent even if the specified replied-to message is not found
**Returns:**
- A `tuple`, on success a `types.Message` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"photo": helper.toDict(photo, True),
"caption": caption,
"parse_mode": parse_mode,
"caption_entities": caption_entities,
"disable_notification": disable_notification,
"reply_to_message_id": reply_to_message_id,
"allow_sending_without_reply": allow_sending_without_reply,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("sendPhoto", data), types.Message)
def sendAudio(self, chat_id: Union[int, str, ], audio: Union[types.InputFile, str, ], caption: str | |
<reponame>QuentinAndre/pysprite<filename>pysprite/psprite.py
import numpy as np
import math
def deviation(data, u):
return (sum([(i - u) ** 2 for i in data]) / (len(data) - 1)) ** .5
def deviation_dict(data, u):
return (sum([((i - u) ** 2) * data[i] for i in data]) / (sum(data.values()) - 1)) ** .5
def SPRITE(u, mean_decimals, sd, sd_decimals, n, min_value, max_value, restrictions=[], random_start="Yes",
min_start="No"):
scale = range(min_value, max_value + 1)
if u > max_value or u < min_value:
return "Your mean is outside the scale"
##GRIM test and possible totals
if round(round(u * n, 0) / n, mean_decimals) != u:
return "grim test failed"
lower = u - .5 / (10 ** mean_decimals)
upper = u + .5 / (10 ** mean_decimals)
l_bound = int(math.ceil(lower * n))
if lower < 0:
l_bound = int(lower * n)
u_bound = int(upper * n)
if upper < 0:
u_bound = int(math.floor(upper * n))
if restrictions:
for i in scale:
if i not in restrictions:
break
start = np.array((n - len(restrictions)) * [i])
random_sum = np.random.choice(range(l_bound, u_bound + 1))
loop_count = 0
if sum(start) + sum(restrictions) == random_sum:
random = start
elif sum(start) + sum(restrictions) > random_sum:
return "Your restrictions are impossible given the mean"
else:
escape = False
while True:
if escape:
break
step = np.random.permutation([0] * (n - 1 - len(restrictions)) + [1])
while True:
loop_count += 1
temp = start + step
if loop_count > 10000:
return "Your restrictions may be impossible"
if max(temp) > max(scale):
break
while True:
X = True
for i in restrictions:
if i in temp:
X = False
if X == True:
break
temp = temp + step
if max(temp) > max(scale):
break
if sum(temp) + sum(restrictions) > random_sum:
break
start = temp
if sum(start) + sum(restrictions) == random_sum:
escape = True
random = start
break
break
else:
##create skew,flat, and random distributions
####################skew
if random_start == "No":
if l_bound == u_bound:
local_u = l_bound / float(n)
if max(scale) - local_u < local_u - min(scale):
skew = [max(scale)]
else:
skew = [min(scale)]
for i in range(n - 1):
if np.mean(skew) <= local_u:
skew.append(max(scale))
else:
skew.append(min(scale))
skew.sort()
if sum(skew) == l_bound:
pass
else:
diff = l_bound - sum(skew)
if diff < 0:
skew[-1] = skew[-1] + diff
else:
skew[0] = skew[0] + diff
else:
max_sd = 0
max_skew = []
for i in range(l_bound, u_bound + 1):
local_u = i / float(n)
if max(scale) - local_u < local_u - min(scale):
temp_skew = [max(scale)]
else:
temp_skew = [min(scale)]
for ii in range(n - 1):
if np.mean(temp_skew) <= local_u:
temp_skew.append(max(scale))
else:
temp_skew.append(min(scale))
temp_skew.sort()
if sum(temp_skew) == i:
if deviation(temp_skew, local_u) > max_sd:
max_sd = deviation(temp_skew, local_u)
max_skew = temp_skew
else:
diff = i - sum(temp_skew)
if diff < 0:
temp_skew[-1] = temp_skew[-1] + diff
else:
temp_skew[0] = temp_skew[0] + diff
if deviation(temp_skew, local_u) > max_sd:
max_sd = deviation(temp_skew, local_u)
max_skew = temp_skew
skew = max_skew
#################################flat
if l_bound == u_bound:
local_u = l_bound / float(n)
flat = n * [int(local_u)]
if l_bound > 0:
while sum(flat) < l_bound:
flat.sort()
flat[0] = flat[0] + 1
else:
while sum(flat) > l_bound:
flat.sort(reverse=True)
flat[0] = flat[0] - 1
else:
min_sd = 1000
min_skew = []
for i in range(l_bound, u_bound + 1):
local_u = i / float(n)
temp_flat = n * [int(local_u)]
if l_bound > 0:
while sum(temp_flat) < i:
temp_flat.sort()
temp_flat[0] = temp_flat[0] + 1
else:
while sum(temp_flat) > i:
temp_flat.sort(reverse=True)
temp_flat[0] = temp_flat[0] - 1
if deviation(temp_flat, local_u) < min_sd:
min_sd = deviation(temp_flat, local_u)
min_skew = temp_flat
flat = min_skew
#####################random
random_sum = np.random.choice(range(l_bound, u_bound + 1))
random = np.array(n * [min(scale)])
if sum(random) == random_sum:
pass
else:
while True:
temp_random = random + np.random.permutation([0] * (n - 1) + [1])
if max(temp_random) > max(scale):
continue
random = temp_random
if sum(random) == random_sum:
break
if not restrictions:
if random_start == 'No':
if min_start == "Yes":
initial = flat
closest_sd = deviation(random, np.mean(flat))
closest = flat
else:
differences = [abs(deviation(flat, np.mean(skew)) - sd), abs(deviation(random, np.mean(random)) - sd),
abs(deviation(skew, np.mean(skew)) - sd)]
closest = [flat, random, skew][differences.index(min(differences))]
closest_sd = deviation(closest, np.mean(closest))
initial = closest
else:
initial = random
closest_sd = deviation(random, np.mean(random))
closest = random
else:
initial = random
data = {}
for i in range(min(scale), max(scale) + 1):
data[i] = 0
for i in initial:
data[i] = data.get(i) + 1
for i in restrictions:
data[i] = data.get(i, 0) + 1
count = 0
true_u = sum([i * data[i] for i in data]) / float(sum(data.values()))
data_sd = deviation_dict(data, true_u)
if restrictions:
closest_sd = data_sd
closest = data
if round(data_sd, sd_decimals) == sd:
return ['solution', data]
##random walk
while True:
count += 1
if count > 50000:
return ["no solution", closest, closest_sd]
if data_sd > sd:
if np.random.random() > .5:
for first in np.random.permutation(scale[:-2]):
if data[first] != 0:
break
if data[first] == 0:
return "first selection error"
for second in np.random.permutation(scale[scale.index(first) + 2:]):
if data[second] != 0:
break
if data[second] == 0:
continue
while True:
if first + 1 not in restrictions and second - 1 not in restrictions \
and first not in restrictions and second not in restrictions \
and data[first] > 0 and data[second] > 0:
data[first] = data[first] - 1
data[first + 1] = data[first + 1] + 1
data[second] = data[second] - 1
data[second - 1] = data[second - 1] + 1
break
else:
first = first - 1
second = second + 1
if data.get(first) >= 0 and data.get(second) >= 0:
continue
else:
break
else:
for first in np.random.permutation(scale[2:]):
if data[first] != 0:
break
if data[first] == 0:
return "first selection error"
for second in np.random.permutation(scale[:scale.index(first) - 1]):
if data[second] != 0:
break
if data[second] == 0:
continue
while True:
if first - 1 not in restrictions and second + 1 not in restrictions \
and first not in restrictions and second not in restrictions \
and data[first] > 0 and data[second] > 0:
data[first] = data[first] - 1
data[first - 1] = data[first - 1] + 1
data[second] = data[second] - 1
data[second + 1] = data[second + 1] + 1
break
else:
first = first + 1
second = second - 1
if data.get(first) >= 0 and data.get(second) >= 0:
continue
else:
break
else:
for first in np.random.permutation(scale[1:-1]):
if data[first] != 0:
break
if data[first] == 0:
return "first selection error"
for second in np.random.permutation(scale[1:-1]):
if data[second] != 0:
if first == second:
if data[first] > 1:
break
else:
continue
else:
break
if first == second:
if data[first] > 1:
pass
else:
continue
if data[second] == 0:
continue
if first >= second:
while True:
if first + 1 not in restrictions and second - 1 not in restrictions \
and first not in restrictions and second not in restrictions \
and data[first] > 0 and data[second] > 0:
data[first] = data[first] - 1
data[first + 1] = data[first + 1] + 1
data[second] = data[second] - 1
data[second - 1] = data[second - 1] + 1
break
else:
first = first + 1
second = second - 1
if data.get(first) >= 0 and data.get(second) >= 0 and data.get(first + 1) and data.has_key(
second - 1):
continue
else:
break
else:
while True:
if first - 1 not in restrictions and second + 1 not in restrictions \
and first not in restrictions and second not in restrictions \
and data[first] > 0 and data[second] > 0:
data[first] = data[first] - 1
data[first - 1] = data[first - 1] + 1
data[second] = data[second] - 1
data[second + 1] = data[second + 1] + 1
break
else:
first = first - 1
second = second + 1
if data.get(first) >= 0 and data.get(second) >= 0 and data.has_key(first - 1) and data.has_key(
second + 1):
continue
else:
break
data_sd = deviation_dict(data, true_u)
if abs(sd - data_sd) < abs(sd - closest_sd):
closest = data
closest_sd = data_sd
if round(data_sd, sd_decimals) | |
<reponame>arthur-hav/QtB3Poker<filename>server/server.py<gh_stars>1-10
#!/usr/bin/env python3
import json
import time
import itertools
from deuces import Card, evaluator
import os
from math import floor
import struct
import queue
from pymongo import MongoClient
import datetime
import requests
import threading
from cryptography.fernet import Fernet
import pika
from base64 import b64encode
import redis
import logging
import yaml
import importlib
logger = logging.getLogger()
def urand():
return struct.unpack('I', os.urandom(4))[0] * 2 ** -32
def get_db():
read_yaml = yaml.safe_load(open('server/conf.yml'))
mongodb_user, mongodb_password = read_yaml['mongodb']['user'], read_yaml['mongodb']['password']
pmc = MongoClient('mongodb://%s:%s@127.0.0.1' % (mongodb_user, mongodb_password))
return pmc.bordeaux_poker_db
class Deck:
def __init__(self):
self.cards = []
for i, r in enumerate('23456789TJQKA'):
for j, color in enumerate('hdsc'):
self.cards.append(Card.new(r + color))
def pop(self):
return self.cards.pop(0)
def fisher_yates_shuffle_improved(self):
amnt_to_shuffle = len(self.cards)
while amnt_to_shuffle > 1:
i = int(floor(urand() * amnt_to_shuffle))
amnt_to_shuffle -= 1
self.cards[i], self.cards[amnt_to_shuffle] = self.cards[amnt_to_shuffle], self.cards[i]
def remove_card(self, card):
self.cards.remove(card)
class Player:
def __init__(self, game, nick, queue_id, key, chips):
super().__init__()
self.hand = None
self.amount_bet = 0
self.street_amount_bet = 0
self.is_folded = False
self.acted_street = False
self.chips = chips
if key:
self.key = Fernet(key)
self.disconnected = False
else:
self.key = None
self.disconnected = True
self.game = game
self.nick = nick
self.queue_id = queue_id
self.action_queue = queue.Queue()
def deal(self, deck):
self.hand = [deck.pop(), deck.pop(), deck.pop()]
def put_sb(self):
amount_sb = min(5, self.chips)
self.amount_bet += amount_sb
self.street_amount_bet += amount_sb
self.chips -= amount_sb
def put_bb(self):
amount_bb = min(10, self.chips)
self.amount_bet += amount_bb
self.street_amount_bet += amount_bb
self.chips -= amount_bb
def act(self, gamehand):
if self.disconnected:
timeout = 0
else:
timeout = gamehand.timeout
try:
action = self.action_queue.get(timeout=timeout).decode('utf-8').strip()
self.disconnected = False
except queue.Empty:
action = 'f'
self.disconnected = True
if action.lower() == 'c':
amount_called = min(gamehand.max_amount_bet - self.amount_bet, self.chips)
self.amount_bet += amount_called
self.street_amount_bet += amount_called
self.chips -= amount_called
gamehand.hand_document['actions'][gamehand.street_act].append({'code': 'C',
'player': self.queue_id,
'amount': amount_called})
gamehand.last_action = 'check' if not amount_called else 'call'
elif action.lower().startswith('r'):
amount_raised = min(int(action.lower()[2:]), self.chips)
self.amount_bet += amount_raised
self.street_amount_bet += amount_raised
self.chips -= amount_raised
gamehand.hand_document['actions'][gamehand.street_act].append({'code': 'R',
'player': self.queue_id,
'amount': amount_raised})
gamehand.last_action = 'raise'
else:
self.is_folded = True
gamehand.hand_document['actions'][gamehand.street_act].append({'code': 'F',
'player': self.queue_id})
gamehand.last_action = 'fold'
def read_queue(self, code, credentials):
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, code,
credentials=credentials))
channel = connection.channel()
channel.queue_declare('game' + '.' + self.queue_id, auto_delete=True)
channel.queue_bind(exchange='poker_exchange',
queue='game.' + self.queue_id,
routing_key='game.' + self.queue_id)
channel.basic_consume(queue='game.' + self.queue_id, on_message_callback=self.put_action, auto_ack=True)
channel.start_consuming()
def put_action(self, ch, method, properties, body):
if body == b'reconnect':
self.disconnected = False
r = redis.Redis()
key = r.get(f'session.{self.queue_id}.key')
self.key = Fernet(key) if key else self.key
self.game.repeat(self.queue_id)
return
self.action_queue.put(body)
class GameHand:
def __init__(self, players, game, deck, mongo_db, tourney_id):
self.players = players
self.game = game
self.flop1 = []
self.flop2 = []
self.turn1 = []
self.turn2 = []
self.river = []
self.deck = deck
self.max_amount_bet = 0
self.prev_raise = 10
self.min_raise = 15
self.timeout = 30
self.mongo_db = mongo_db.hands
self.last_action = None
self.hand_document = {'actions': {'blinds': [], 'preflop': [], 'flop': [], 'turn': [], 'river': []},
'winnings': {},
'hands': {},
'chips': {},
'board': [],
'tourney_id': tourney_id,
'date': datetime.datetime.utcnow()}
self.street_act = 'blinds'
for player in self.players:
player.to_act = False
self.hand_document['chips'][player.queue_id] = player.chips
self.hand_document['winnings'][player.queue_id] = 0
def _deal(self):
self.last_action = None
for p in list(self.players):
if not p.chips and not p.amount_bet:
self.players.remove(p)
for p in self.players:
p.acted_street = False
p.street_amount_bet = 0
def deal_preflop(self):
self._deal()
self.players.append(self.players.pop(0))
if len(self.players) == 2:
self.players.reverse()
for i, p in enumerate(self.players):
p.is_folded = False
p.amount_bet = 0
if i == 0:
p.put_sb()
self.hand_document['actions'][self.street_act].append(
{'code': 'SB', 'amount': p.amount_bet, 'player': p.queue_id})
if i == 1:
p.put_bb()
self.hand_document['actions'][self.street_act].append(
{'code': 'BB', 'amount': p.amount_bet, 'player': p.queue_id})
self.max_amount_bet = max(self.max_amount_bet, p.amount_bet)
p.deal(self.deck)
self.hand_document['hands'][p.queue_id] = ''.join(Card.int_to_str(c) for c in p.hand)
players_actable = [p for p in self.players if p.chips and p.amount_bet < self.max_amount_bet]
if not players_actable:
if len(self.players) == 2:
self.players.reverse()
return None
i = 2
while self.players[i % len(self.players)].chips == 0:
i += 1
self.street_act = 'preflop'
to_act = self.players[i % len(self.players)]
return to_act
def act(self, player):
player.act(self)
if player.amount_bet > self.max_amount_bet and not self.flop1:
self.min_raise, self.prev_raise = self.prev_raise + self.min_raise, self.min_raise
self.max_amount_bet = max(self.max_amount_bet, player.amount_bet)
player.acted_street = True
index = self.players.index(player)
for p in self.players[index + 1:] + self.players[:index]:
if p.is_folded:
continue
if not p.chips:
continue
if p.amount_bet == self.max_amount_bet and p.acted_street:
continue
return p
return None
def calc_bet_pot(self):
bet_amounts = []
for p in self.players:
if not p.chips and p.amount_bet < self.max_amount_bet:
bet_amounts.append(p.amount_bet)
bet_amounts.append(self.max_amount_bet)
pot_amounts = [0] * len(bet_amounts)
for player in self.players:
prev_bet = 0
for i, amount in enumerate(bet_amounts):
amount = max(min(amount - prev_bet, player.amount_bet - prev_bet), 0)
pot_amounts[i] += amount
prev_bet = bet_amounts[i]
return list(zip(bet_amounts, pot_amounts))
def calc_prev_street_pot(self):
return sum(p.amount_bet - p.street_amount_bet for p in self.players)
def get_btn_player(self):
if len(self.players) == 2 and not self.flop1:
return self.players[0]
return self.players[-1]
def send_state(self, to_act, showdown=None):
btn_player = self.get_btn_player()
common = {
'board': ''.join(
Card.int_to_str(c) for c in self.flop1 + self.flop2 + self.turn1 + self.turn2 + self.river),
'active': to_act.nick if to_act else None,
'prev_pot': self.calc_prev_street_pot(),
'pot': [bp[1] for bp in self.calc_bet_pot()],
'players': [
{'chips': p.chips,
'bet': p.street_amount_bet,
'name': p.nick,
'is_folded': p.is_folded,
'disconnected': p.disconnected,
'btn': p == btn_player} for p in self.players
],
'last_action': self.last_action,
}
private = {}
if showdown:
common['winning_hand'] = ''.join(Card.int_to_str(c) for c in showdown)
for i, player in enumerate(self.players):
if not player.is_folded:
common['players'][i]['holes'] = ''.join(Card.int_to_str(c) for c in player.hand)
else:
for i, player in enumerate(self.players):
if self.flop1:
min_raise = max(2 * (self.max_amount_bet - player.amount_bet), 10)
else: # preflop
min_raise = self.min_raise + self.max_amount_bet - player.amount_bet
min_raise = min(min_raise, player.chips)
to_call = min(self.max_amount_bet - player.amount_bet, player.chips)
private[player.queue_id] = {'to_call': to_call,
'min_raise': min_raise,
'nl_raise': bool(self.flop1),
'holes': ''.join(Card.int_to_str(c) for c in player.hand)
}
self.game.broadcast(common)
for p_id, v in private.items():
self.game.send_player(p_id, v)
def deal_flop(self):
self.send_state(None)
if len(self.players) == 2:
self.players.reverse()
self.timeout = 60
self._deal()
self.flop1 = [self.deck.pop(), self.deck.pop()]
self.flop2 = [self.deck.pop(), self.deck.pop()]
self.hand_document['board'].append(''.join(Card.int_to_str(c) for c in self.flop1) + '/' +
''.join(Card.int_to_str(c) for c in self.flop2))
players_actable = [p for p in self.players if p.chips and not p.is_folded]
self.street_act = 'flop'
return players_actable[0] if len(players_actable) > 1 else None
def deal_turn(self):
self.send_state(None)
self._deal()
self.turn1 = [self.deck.pop()]
self.turn2 = [self.deck.pop()]
self.hand_document['board'].append(''.join(Card.int_to_str(c) for c in self.turn1) + '/' +
''.join(Card.int_to_str(c) for c in self.turn2))
players_actable = [p for p in self.players if p.chips and not p.is_folded]
self.street_act = 'turn'
return players_actable[0] if len(players_actable) > 1 else None
def deal_river(self):
self.send_state(None)
self._deal()
self.river = [self.deck.pop()]
players_actable = [p for p in self.players if p.chips and not p.is_folded]
self.hand_document['board'].append(''.join(Card.int_to_str(c) for c in self.river))
self.street_act = 'river'
return players_actable[0] if len(players_actable) > 1 else None
def check_all_folded(self):
if len([p for p in self.players if not p.is_folded]) == 1:
winner = [p for p in self.players if not p.is_folded][0]
for p in self.players:
self.hand_document['winnings'][p.queue_id] -= p.amount_bet
winner.chips += self.calc_bet_pot()[0][1]
if len(self.players) == 2 and not self.flop1:
self.players.reverse()
self.hand_document['winnings'][winner.queue_id] += self.calc_bet_pot()[0][1]
self.mongo_db.insert_one(self.hand_document)
self.send_state(None)
time.sleep(1)
return True
return False
def showdown(self):
ev = evaluator.Evaluator()
for p in self.players:
self.hand_document['winnings'][p.queue_id] -= p.amount_bet
player_ranks = {}
players_comb = {}
for player in self.players:
if player.is_folded:
continue
for comb in itertools.combinations(player.hand, 2):
for flop in self.flop1, self.flop2:
for turn in self.turn1, self.turn2:
for bcomb in itertools.combinations(flop + turn + self.river, 3):
rank = ev.evaluate(cards=list(comb), board=list(bcomb))
if not player_ranks.get(player.queue_id) or rank < player_ranks[player.queue_id]:
player_ranks[player.queue_id] = rank
players_comb[player.queue_id] = list(comb) + list(bcomb)
last_amount_bet = 0
for i, (bet, pot) in enumerate(self.calc_bet_pot()):
min_rank = None
min_player = None
for player in self.players:
if player.is_folded or player.amount_bet < bet - last_amount_bet:
player.amount_bet = 0
continue
if min_rank is None or player_ranks[player.queue_id] < min_rank:
min_rank = player_ranks[player.queue_id]
min_player = player
player.amount_bet -= bet - last_amount_bet
last_amount_bet = bet
self.hand_document['winnings'][min_player.queue_id] += pot
min_player.chips += pot
self.send_state(None, showdown=players_comb[min_player.queue_id])
time.sleep(3.5)
self.mongo_db.insert_one(self.hand_document)
class Game:
def __init__(self, players, code, credentials, game_config):
r = redis.Redis()
self.credentials = credentials
self.players = [Player(self,
p['login'],
str(p['_id']),
r.get(f'session.{p["_id"]}.key'),
int(game_config['start_chips']))
for p in players]
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, code,
credentials=credentials))
self.game_config = game_config
self.start_time = datetime.datetime.utcnow()
self.code = code
self.mongo_db = get_db()
self.tourney_id = self.mongo_db.tourneys.insert_one({'placements': {},
'players': [p.queue_id for p in self.players],
'game': os.getpid(),
'date': self.start_time}).inserted_id
self.last_msg_private = {}
self.last_msg_public = None
self.blind_augment = None
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='poker_exchange', exchange_type='topic')
self.channel.queue_declare('public', auto_delete=True)
self.channel.queue_bind(exchange='poker_exchange',
queue='public',
routing_key='public')
for p in self.players:
self.channel.queue_declare(f'public.{p.queue_id}')
self.channel.queue_bind(exchange='poker_exchange',
queue=f'public.{p.queue_id}',
routing_key='public')
rabbit_consumer = threading.Thread(target=p.read_queue, args=(code, credentials,))
rabbit_consumer.start()
def broadcast(self, msg):
self.last_msg_public = msg
self.channel.basic_publish(exchange='poker_exchange',
routing_key='public',
body=json.dumps(msg).encode('utf-8'))
def send_player(self, p_id, msg):
for p in self.players:
if p.queue_id | |
Constraint(expr= m.b227 - m.b233 + m.b278 <= 1)
m.c2059 = Constraint(expr= m.b227 - m.b234 + m.b279 <= 1)
m.c2060 = Constraint(expr= m.b228 - m.b229 + m.b280 <= 1)
m.c2061 = Constraint(expr= m.b228 - m.b230 + m.b281 <= 1)
m.c2062 = Constraint(expr= m.b228 - m.b231 + m.b282 <= 1)
m.c2063 = Constraint(expr= m.b228 - m.b232 + m.b283 <= 1)
m.c2064 = Constraint(expr= m.b228 - m.b233 + m.b284 <= 1)
m.c2065 = Constraint(expr= m.b228 - m.b234 + m.b285 <= 1)
m.c2066 = Constraint(expr= m.b229 - m.b230 + m.b286 <= 1)
m.c2067 = Constraint(expr= m.b229 - m.b231 + m.b287 <= 1)
m.c2068 = Constraint(expr= m.b229 - m.b232 + m.b288 <= 1)
m.c2069 = Constraint(expr= m.b229 - m.b233 + m.b289 <= 1)
m.c2070 = Constraint(expr= m.b229 - m.b234 + m.b290 <= 1)
m.c2071 = Constraint(expr= m.b230 - m.b231 + m.b291 <= 1)
m.c2072 = Constraint(expr= m.b230 - m.b232 + m.b292 <= 1)
m.c2073 = Constraint(expr= m.b230 - m.b233 + m.b293 <= 1)
m.c2074 = Constraint(expr= m.b230 - m.b234 + m.b294 <= 1)
m.c2075 = Constraint(expr= m.b231 - m.b232 + m.b295 <= 1)
m.c2076 = Constraint(expr= m.b231 - m.b233 + m.b296 <= 1)
m.c2077 = Constraint(expr= m.b231 - m.b234 + m.b297 <= 1)
m.c2078 = Constraint(expr= m.b232 - m.b233 + m.b298 <= 1)
m.c2079 = Constraint(expr= m.b232 - m.b234 + m.b299 <= 1)
m.c2080 = Constraint(expr= m.b233 - m.b234 + m.b300 <= 1)
m.c2081 = Constraint(expr= m.b235 - m.b236 + m.b246 <= 1)
m.c2082 = Constraint(expr= m.b235 - m.b237 + m.b247 <= 1)
m.c2083 = Constraint(expr= m.b235 - m.b238 + m.b248 <= 1)
m.c2084 = Constraint(expr= m.b235 - m.b239 + m.b249 <= 1)
m.c2085 = Constraint(expr= m.b235 - m.b240 + m.b250 <= 1)
m.c2086 = Constraint(expr= m.b235 - m.b241 + m.b251 <= 1)
m.c2087 = Constraint(expr= m.b235 - m.b242 + m.b252 <= 1)
m.c2088 = Constraint(expr= m.b235 - m.b243 + m.b253 <= 1)
m.c2089 = Constraint(expr= m.b235 - m.b244 + m.b254 <= 1)
m.c2090 = Constraint(expr= m.b235 - m.b245 + m.b255 <= 1)
m.c2091 = Constraint(expr= m.b236 - m.b237 + m.b256 <= 1)
m.c2092 = Constraint(expr= m.b236 - m.b238 + m.b257 <= 1)
m.c2093 = Constraint(expr= m.b236 - m.b239 + m.b258 <= 1)
m.c2094 = Constraint(expr= m.b236 - m.b240 + m.b259 <= 1)
m.c2095 = Constraint(expr= m.b236 - m.b241 + m.b260 <= 1)
m.c2096 = Constraint(expr= m.b236 - m.b242 + m.b261 <= 1)
m.c2097 = Constraint(expr= m.b236 - m.b243 + m.b262 <= 1)
m.c2098 = Constraint(expr= m.b236 - m.b244 + m.b263 <= 1)
m.c2099 = Constraint(expr= m.b236 - m.b245 + m.b264 <= 1)
m.c2100 = Constraint(expr= m.b237 - m.b238 + m.b265 <= 1)
m.c2101 = Constraint(expr= m.b237 - m.b239 + m.b266 <= 1)
m.c2102 = Constraint(expr= m.b237 - m.b240 + m.b267 <= 1)
m.c2103 = Constraint(expr= m.b237 - m.b241 + m.b268 <= 1)
m.c2104 = Constraint(expr= m.b237 - m.b242 + m.b269 <= 1)
m.c2105 = Constraint(expr= m.b237 - m.b243 + m.b270 <= 1)
m.c2106 = Constraint(expr= m.b237 - m.b244 + m.b271 <= 1)
m.c2107 = Constraint(expr= m.b237 - m.b245 + m.b272 <= 1)
m.c2108 = Constraint(expr= m.b238 - m.b239 + m.b273 <= 1)
m.c2109 = Constraint(expr= m.b238 - m.b240 + m.b274 <= 1)
m.c2110 = Constraint(expr= m.b238 - m.b241 + m.b275 <= 1)
m.c2111 = Constraint(expr= m.b238 - m.b242 + m.b276 <= 1)
m.c2112 = Constraint(expr= m.b238 - m.b243 + m.b277 <= 1)
m.c2113 = Constraint(expr= m.b238 - m.b244 + m.b278 <= 1)
m.c2114 = Constraint(expr= m.b238 - m.b245 + m.b279 <= 1)
m.c2115 = Constraint(expr= m.b239 - m.b240 + m.b280 <= 1)
m.c2116 = Constraint(expr= m.b239 - m.b241 + m.b281 <= 1)
m.c2117 = Constraint(expr= m.b239 - m.b242 + m.b282 <= 1)
m.c2118 = Constraint(expr= m.b239 - m.b243 + m.b283 <= 1)
m.c2119 = Constraint(expr= m.b239 - m.b244 + m.b284 <= 1)
m.c2120 = Constraint(expr= m.b239 - m.b245 + m.b285 <= 1)
m.c2121 = Constraint(expr= m.b240 - m.b241 + m.b286 <= 1)
m.c2122 = Constraint(expr= m.b240 - m.b242 + m.b287 <= 1)
m.c2123 = Constraint(expr= m.b240 - m.b243 + m.b288 <= 1)
m.c2124 = Constraint(expr= m.b240 - m.b244 + m.b289 <= 1)
m.c2125 = Constraint(expr= m.b240 - m.b245 + m.b290 <= 1)
m.c2126 = Constraint(expr= m.b241 - m.b242 + m.b291 <= 1)
m.c2127 = Constraint(expr= m.b241 - m.b243 + m.b292 <= 1)
m.c2128 = Constraint(expr= m.b241 - m.b244 + m.b293 <= 1)
m.c2129 = Constraint(expr= m.b241 - m.b245 + m.b294 <= 1)
m.c2130 = Constraint(expr= m.b242 - m.b243 + m.b295 <= 1)
m.c2131 = Constraint(expr= m.b242 - m.b244 + m.b296 <= 1)
m.c2132 = Constraint(expr= m.b242 - m.b245 + m.b297 <= 1)
m.c2133 = Constraint(expr= m.b243 - m.b244 + m.b298 <= 1)
m.c2134 = Constraint(expr= m.b243 - m.b245 + m.b299 <= 1)
m.c2135 = Constraint(expr= m.b244 - m.b245 + m.b300 <= 1)
m.c2136 = Constraint(expr= m.b246 - m.b247 + m.b256 <= 1)
m.c2137 = Constraint(expr= m.b246 - m.b248 + m.b257 <= 1)
m.c2138 = Constraint(expr= m.b246 - m.b249 + m.b258 <= 1)
m.c2139 = Constraint(expr= m.b246 - m.b250 + m.b259 <= 1)
m.c2140 = Constraint(expr= m.b246 - m.b251 + m.b260 <= 1)
m.c2141 = Constraint(expr= m.b246 - m.b252 + m.b261 <= 1)
m.c2142 = Constraint(expr= m.b246 - m.b253 + m.b262 <= 1)
m.c2143 = Constraint(expr= m.b246 - m.b254 + m.b263 <= 1)
m.c2144 = Constraint(expr= m.b246 - m.b255 + m.b264 <= 1)
m.c2145 = Constraint(expr= m.b247 - m.b248 + m.b265 <= 1)
m.c2146 = Constraint(expr= m.b247 - m.b249 + m.b266 <= 1)
m.c2147 = Constraint(expr= m.b247 - m.b250 + m.b267 <= 1)
m.c2148 = Constraint(expr= m.b247 - m.b251 + m.b268 <= 1)
m.c2149 = Constraint(expr= m.b247 - m.b252 + m.b269 <= 1)
m.c2150 = Constraint(expr= m.b247 - m.b253 + m.b270 <= 1)
m.c2151 = Constraint(expr= m.b247 - m.b254 + m.b271 <= 1)
m.c2152 = Constraint(expr= m.b247 - m.b255 + m.b272 <= 1)
m.c2153 = Constraint(expr= m.b248 - m.b249 + m.b273 <= 1)
m.c2154 = Constraint(expr= m.b248 - m.b250 + m.b274 <= 1)
m.c2155 = Constraint(expr= m.b248 - m.b251 + m.b275 <= 1)
m.c2156 = Constraint(expr= m.b248 - m.b252 + m.b276 <= 1)
m.c2157 = Constraint(expr= m.b248 - m.b253 + m.b277 <= 1)
m.c2158 = Constraint(expr= m.b248 - m.b254 + m.b278 <= 1)
m.c2159 = Constraint(expr= m.b248 - m.b255 + m.b279 <= 1)
m.c2160 = Constraint(expr= m.b249 - m.b250 + m.b280 <= 1)
m.c2161 = Constraint(expr= m.b249 - m.b251 + m.b281 <= 1)
m.c2162 = Constraint(expr= m.b249 - m.b252 + m.b282 <= 1)
m.c2163 = Constraint(expr= m.b249 - m.b253 + m.b283 <= 1)
m.c2164 = Constraint(expr= m.b249 - m.b254 + m.b284 <= 1)
m.c2165 = Constraint(expr= m.b249 - m.b255 + m.b285 <= 1)
m.c2166 = Constraint(expr= m.b250 - m.b251 + m.b286 <= 1)
m.c2167 = Constraint(expr= m.b250 - m.b252 + m.b287 <= 1)
m.c2168 = Constraint(expr= m.b250 - m.b253 + m.b288 <= 1)
m.c2169 = Constraint(expr= m.b250 - m.b254 + m.b289 <= 1)
m.c2170 = Constraint(expr= m.b250 - m.b255 + m.b290 <= 1)
m.c2171 = Constraint(expr= m.b251 - m.b252 + m.b291 <= 1)
m.c2172 = Constraint(expr= m.b251 - m.b253 + m.b292 <= 1)
m.c2173 = Constraint(expr= m.b251 - m.b254 + m.b293 <= 1)
m.c2174 = Constraint(expr= m.b251 - m.b255 + m.b294 <= 1)
m.c2175 = Constraint(expr= m.b252 - m.b253 + m.b295 <= 1)
m.c2176 = Constraint(expr= m.b252 - m.b254 + m.b296 <= 1)
m.c2177 = Constraint(expr= m.b252 - m.b255 + m.b297 <= 1)
m.c2178 = Constraint(expr= m.b253 - m.b254 + m.b298 <= 1)
m.c2179 = Constraint(expr= m.b253 - m.b255 + m.b299 <= 1)
m.c2180 = Constraint(expr= m.b254 - m.b255 + m.b300 <= 1)
m.c2181 = Constraint(expr= m.b256 - m.b257 + m.b265 <= 1)
m.c2182 = Constraint(expr= m.b256 - m.b258 + m.b266 <= 1)
m.c2183 = Constraint(expr= m.b256 - m.b259 + m.b267 <= 1)
m.c2184 = Constraint(expr= m.b256 - m.b260 + m.b268 <= 1)
m.c2185 = Constraint(expr= m.b256 - m.b261 + m.b269 <= 1)
m.c2186 = Constraint(expr= m.b256 - m.b262 + m.b270 <= 1)
m.c2187 = Constraint(expr= m.b256 - m.b263 + m.b271 <= 1)
m.c2188 = Constraint(expr= m.b256 - m.b264 + m.b272 <= 1)
m.c2189 = Constraint(expr= m.b257 - m.b258 + m.b273 <= 1)
m.c2190 = Constraint(expr= m.b257 - m.b259 + m.b274 <= 1)
m.c2191 = Constraint(expr= m.b257 - m.b260 + m.b275 <= 1)
m.c2192 = Constraint(expr= m.b257 - m.b261 + m.b276 <= 1)
m.c2193 = Constraint(expr= m.b257 - m.b262 + m.b277 <= 1)
m.c2194 = Constraint(expr= m.b257 - m.b263 + m.b278 <= 1)
m.c2195 = Constraint(expr= m.b257 - m.b264 + m.b279 <= 1)
m.c2196 = Constraint(expr= m.b258 - m.b259 + m.b280 <= 1)
m.c2197 = Constraint(expr= m.b258 - m.b260 + m.b281 <= 1)
m.c2198 = Constraint(expr= m.b258 - m.b261 + m.b282 <= 1)
m.c2199 = Constraint(expr= m.b258 - m.b262 + m.b283 <= 1)
m.c2200 = Constraint(expr= m.b258 | |
# Command line script for checking agreement in the Icelandic pronunciation dictionary, which is in a tab-seperated .csv format
# Errors, discrepancies and suspicious-looking entries printed to a text file.
# The two arguments are by default 'ice_pron_dict_complete_2106.csv' and 'check_agreement_output.txt' (overwritten every time the script is run!)
# These arguments can also be specified by user like so:
# $ python check_agreement.py --PEDI_file=my_PEDI_file.csv --output_file=my_output_file.txt
# Note that this script does not overwrite anything in the .csv file which is checked. Some manual checking of the output will be required.
# For more info, see README.md
import sys
import argparse
prefixes = ['af', 'al', 'all', 'and', 'auð', 'einka', 'endur', 'fjar',
'fjöl', 'for', 'frum', 'gagn', 'mis', 'ná', 'ó', 'sam', 'sí', 'tor', 'van', 'ör']
clusters = ['áf','ág','óf','óg','úf','úg','fld','gns','gts','fns','kts','fts','kkts','lds','llds',
'lfr','lfs','lks','lps','lsks','lts','llts','mds','mmds','mps','nds','nnds','ngds','ngn',
'nks','nsks','nnsks','pts','ppts','rfs','rfst','rgs','rks','rrks','rkst','rkts','rls','rmd',
'rms','rmt','rnd','rnsk','rps','rpst','rpts','rsk','rsks','rskt','rsl','rsn','rst','rrst',
'rsts','rts','rrts','sks','sps','stk','sts']
tf = ['true', 'false']
languages = ['IS', 'GB', 'DE', 'FR', 'IT', 'DK', 'NL', 'NO', 'SE', 'ES']
variants = ['standard_clear', 'standard_cas', 'north_clear', 'north_cas', 'northeast_clear',
'northeast_cas', 'south_clear','south_cas', 'all']
POS = ['n', 'lo', 'so', 'fn', 'ao', 'ns', 'to', 'fs', 'st', 'none']
compound_attrs = ['head', 'modifier', 'both', 'none']
symbols = ['p', 'p_h', 't', 't_h', 'c', 'c_h', 'k', 'k_h', 'v', 'f', 'D', 'T', 's',
'j', 'C', 'G', 'x', 'h', 'm', 'n', 'J', 'N', 'm_0', 'n_0', 'J_0', 'N_0', 'l',
'l_0', 'r', 'r_0', 'I', 'I:', 'i', 'i:', 'E', 'E:', 'a', 'a:', 'Y', 'Y:',
'9', '9:', 'u', 'u:', 'O', 'O:', 'au', 'au:', 'ou', 'ou:', 'ei', 'ei:', 'ai',
'ai:', '9i', '9i:', 'Yi', 'Oi']
vowels = ['I', 'I:', 'i', 'i:', 'E', 'E:', 'a', 'a:', 'Y', 'Y:',
'9', '9:', 'u', 'u:', 'O', 'O:', 'au', 'au:', 'ou', 'ou:', 'ei', 'ei:', 'ai',
'ai:', '9i', '9i:', 'Yi', 'Oi']
word_dict = {}
identical_entries = []
identical_exc_compound = []
identical_exc_attr = []
identical_exc_prefix = []
modifiers = []
heads = []
# Lists of symbols below are for checking whether inconsistencies in pronunciation of modifiers are due to regular
# phenomena and should therefore not be listed as errors
# e.g. 'sann' pronounced [s a J] and not [s a n] in 'sanngjarnt', due to assimilation
# Dicts contain variants of same sounds, which are substituted depending on sound environment
voiced_2_unvoiced = {'D':'T', 'G':'x', 'v':'f'}
back_2_front = {'k':'c', 'N k':'J c'}
# Lists of sound categories that can impact other sounds
unvoiced_sounds = ['p_h', 't_h', 'c_h', 'k_h', 'f', 'T', 's', 'C', 'x', 'h']
vowels_and_voiced = ['I', 'I:', 'i', 'i:', 'E', 'E:', 'a', 'a:', 'Y', 'Y:',
'9', '9:', 'u', 'u:', 'O', 'O:', 'au', 'au:', 'ou', 'ou:', 'ei', 'ei:', 'ai',
'ai:', '9i', '9i:', 'Yi', 'Oi', 'm', 'n', 'v', 'l', 'j']
front_vowels = ['I', 'I:', 'i', 'i:', 'E', 'E:', 'ei', 'ei:', 'ai', 'ai:', 'j']
# Grouped together here, the possible sounds that can follow an aspirated plosive in Icelandic
vowels_and_vrj = ['I', 'I:', 'i', 'i:', 'E', 'E:', 'a', 'a:', 'Y', 'Y:',
'9', '9:', 'u', 'u:', 'O', 'O:', 'au', 'au:', 'ou', 'ou:', 'ei', 'ei:', 'ai',
'ai:', '9i', '9i:', 'Yi', 'Oi', 'v', 'j', 'r']
def is_unvoiced_variant(modifier, compound):
mod_len = len(modifier)
if modifier[-1] in voiced_2_unvoiced and compound.startswith(modifier[:-1] + voiced_2_unvoiced[modifier[-1]])\
and compound[mod_len+1] in unvoiced_sounds:
return True
return False
def no_aspiration(modifier, compound):
if modifier[-3:] in ['p_h', 't_h', 'k_h']:
unasp_mod = modifier[:-3]
mod_len = len(unasp_mod)
if ':' in unasp_mod[-5:]:
short_unasp_mod = unasp_mod.replace(':', '')
short_mod_len = len(short_unasp_mod)
if compound.startswith(unasp_mod) and compound[mod_len + 1] not in vowels_and_vrj:
return True
elif compound.startswith(short_unasp_mod) and compound[short_mod_len + 1] not in vowels_and_vrj:
return True
return False
def is_short_vowel_mod(modifier, compound):
if ':' in modifier[-5:]:
short_modifier = modifier.replace(':', '')
mod_len = len(short_modifier)
if compound.startswith(short_modifier) and compound[mod_len+1] not in vowels:
return True
elif is_unvoiced_variant(short_modifier, compound):
return True
return False
def is_short_vowel_head(head, compound):
if ':' in head:
short_head = head.replace(':', '')
if compound.endswith(short_head):
return True
return False
def is_voiced_variant(modifier, compound):
if modifier[-3:] in ['n_0', 'm_0', 'l_0', 'r_0']:
voiced_modifier = modifier[:-2]
mod_len = len(voiced_modifier)
if compound.startswith(voiced_modifier) and compound[mod_len + 1] in vowels_and_voiced:
return True
return False
def is_front_variant(modifier, compound):
mod_len = len(modifier)
if len(compound) > mod_len:
next_sound = compound[mod_len + 1]
if len(compound) > (mod_len + 2):
if compound[mod_len + 2] != ' ':
next_sound += compound[mod_len + 2]
if modifier[-1] in back_2_front and compound.startswith(modifier[:-1] + back_2_front[modifier[-1]]) \
and next_sound in front_vowels:
return True
elif modifier[-3:] in back_2_front and compound.startswith(modifier[:-3] + back_2_front[modifier[-3:]]) \
and next_sound in front_vowels:
return True
return False
def no_plosive(modifier, compound):
if modifier.endswith('N k') and compound.startswith(modifier[:-2] + ' s'):
return True
return False
def is_assimilated(modifier, compound):
if ':' in modifier[-5:]:
modifier = modifier.replace(':', '')
split_mod = modifier.split(' ')
split_comp = compound.split(' ')
mod_len = len(split_mod)
first_part = ' '.join(split_mod[:-1])
if split_mod[-1] == 'n' and compound.startswith(first_part) and \
split_comp[mod_len - 1] in ['N', 'J', 'N_0', 'J_0'] and \
split_comp[mod_len] in ['k', 'c', 'k_h', 'c_h']:
return True
return False
def is_high_vowel(modifier, compound):
if modifier[-1] == 'I' and compound.startswith(modifier[:-1] + 'i N'):
return True
elif modifier[-1] == 'I' and compound.startswith(modifier[:-1] + 'i J'):
return True
return False
def is_softened(modifier, compound):
if modifier[-1] == 'k' and compound.startswith(modifier[:-1] + 'G D'):
return True
return False
def is_regular_pron(modifier, compound):
if is_unvoiced_variant(modifier, compound):
return True
elif is_short_vowel_mod(modifier, compound):
return True
elif is_voiced_variant(modifier, compound):
return True
elif is_front_variant(modifier, compound):
return True
elif no_aspiration(modifier, compound):
return True
elif no_plosive(modifier, compound):
return True
elif is_assimilated(modifier, compound):
return True
elif is_high_vowel(modifier, compound):
return True
elif is_softened(modifier, compound):
return True
return False
def main(PEDI_file, output_file):
prev_line = None
org_stdout = sys.stdout
outfile = open(output_file, 'w')
sys.stdout = outfile
with open(PEDI_file, 'r') as f:
next(f)
for line in f:
word, sampa, pos, variant, is_compound, compound_attr, prefix, language, valid, rest = line.split('\t', 9)
# Checking all symbols in phonetic transcription are allowable
sampa_symbols = sampa.split(' ')
for s in sampa_symbols:
if s not in symbols:
print('Warning! Non-allowable phonetic symbol', s, 'for entry:')
print(line)
# Checking options in other columns are allowable
if pos not in POS or variant not in variants or is_compound not in tf \
or compound_attr not in compound_attrs \
or prefix not in tf or language not in languages or valid not in tf:
print('Warning! Non-allowable option for entry:')
print(line)
# Storing all entries for all word forms
line = line.split('\t', 9)
if valid == 'true':
if word in word_dict:
word_dict[word].append((line))
else:
word_dict[word] = []
word_dict[word].append((line))
# Checking for identical entries here:
if line == prev_line:
identical_entries.append(line)
elif prev_line and line[:4] == prev_line[:4] and line[5:] == prev_line[5:]:
identical_exc_compound.append(line)
elif prev_line and line[:2] == prev_line[:2] and line[5] != prev_line[5]:
identical_exc_attr.append(line)
elif prev_line and line[:5] == prev_line[:5] and line[6:] == prev_line[6:]:
identical_exc_prefix.append(line)
prev_line = line
# Collecting words marked heads and modifiers to later check agreement when used in compounds:
if compound_attr == 'modifier':
modifiers.append((word, sampa, variant))
elif compound_attr == 'head':
heads.append((word, sampa, variant))
elif compound_attr == 'both':
modifiers.append((word, sampa, variant))
heads.append((word, sampa, variant))
# Checking for missing dialectal variant entries, or entries that should be marked 'all' for PRON_VARIANT
print('Only one entry, PRON_VARIANT not \'all\':')
count = 0
for word in word_dict:
if len(word_dict[word]) == 1 and word_dict[word][0][3] != 'all':
print(word_dict[word])
count += 1
elif len(word_dict[word]) == 2 and word_dict[word][0][2] != word_dict[word][1][2] and word_dict[word][0][3] != 'all':
print(word_dict[word])
count += 1
if count == 0:
print('No instances found.')
# Checking for multiple entries for the same word form and dialectal variant
# (these will include homographs, which should be ignored)
print('Multiple entries (possibly homographs):')
count = 0
for word in word_dict:
if len(word_dict[word]) > 1:
variant_list = []
sampa_list = []
for entry in word_dict[word]:
variant_list.append(entry[3])
sampa_list.append(entry[1])
if len(set(sampa_list)) > len(set(variant_list)):
print(word_dict[word])
count += 1
if count == 0:
print('No instances found.')
# Checking for multiple entries, where one is marked 'all' for PRON_VARIANT
print('Multiple entries, one marked \'all\':')
count = 0
for word in word_dict:
if len(word_dict[word]) > 1:
entry_list = []
for entry in word_dict[word]:
entry_list.append(entry[3])
if len(set(entry_list)) > 1 and 'all' in entry_list:
print(word_dict[word])
count += 1
if count == 0:
print('No instances found.')
# Checking for multiple entries, where none are marked 'standard_clear' for PRON_VARIANT
print('Multiple entries, none marked \'standard clear\':')
count = 0
for word in word_dict:
if len(word_dict[word]) > 1:
entry_list = []
for entry in word_dict[word]:
entry_list.append(entry[3])
if len(set(entry_list)) > 1 and 'standard_clear' not in entry_list:
print(word_dict[word])
count += 1
if count == 0:
print('No instances found.')
# Checking for identical entries:
print('Identical entries:')
count = 0
for | |
"""
ClassExpression :=
Class |
ObjectIntersectionOf | ObjectUnionOf | ObjectComplementOf | ObjectOneOf |
ObjectSomeValuesFrom | ObjectAllValuesFrom | ObjectHasValue | ObjectHasSelf |
ObjectMinCardinality | ObjectMaxCardinality | ObjectExactCardinality |
DataSomeValuesFrom | DataAllValuesFrom | DataHasValue |
DataMinCardinality | DataMaxCardinality | DataExactCardinality
ObjectIntersectionOf := 'ObjectIntersectionOf' '(' ClassExpression ClassExpression { ClassExpression } ')'
ObjectUnionOf := 'ObjectUnionOf' '(' ClassExpression ClassExpression { ClassExpression } ')'
ObjectComplementOf := 'ObjectComplementOf' '(' ClassExpression ')'
ObjectOneOf := 'ObjectOneOf' '(' Individual { Individual }')'
ObjectSomeValuesFrom := 'ObjectSomeValuesFrom' '(' ObjectPropertyExpression ClassExpression ')'
ObjectAllValuesFrom := 'ObjectAllValuesFrom' '(' ObjectPropertyExpression ClassExpression ')'
ObjectHasValue := 'ObjectHasValue' '(' ObjectPropertyExpression Individual ')'
ObjectHasSelf := 'ObjectHasSelf' '(' ObjectPropertyExpression ')'
ObjectMinCardinality := 'ObjectMinCardinality' '(' nonNegativeInteger ObjectPropertyExpression [ ClassExpression ] ')'
ObjectMaxCardinality := 'ObjectMaxCardinality' '(' nonNegativeInteger ObjectPropertyExpression [ ClassExpression ] ')'
ObjectExactCardinality := 'ObjectExactCardinality' '(' nonNegativeInteger ObjectPropertyExpression [ ClassExpression ] ')'
DataSomeValuesFrom := 'DataSomeValuesFrom' '(' DataPropertyExpression { DataPropertyExpression } DataRange ')'
DataAllValuesFrom := 'DataAllValuesFrom' '(' DataPropertyExpression { DataPropertyExpression } DataRange ')'
DataHasValue := 'DataHasValue' '(' DataPropertyExpression Literal ')'
DataMinCardinality := 'DataMinCardinality' '(' nonNegativeInteger DataPropertyExpression [ DataRange ] ')'
DataMaxCardinality := 'DataMaxCardinality' '(' nonNegativeInteger DataPropertyExpression [ DataRange ] ')'
DataExactCardinality := 'DataExactCardinality' '(' nonNegativeInteger DataPropertyExpression [ DataRange ] ')'
HasKey := 'HasKey' '(' axiomAnnotations ClassExpression '(' { ObjectPropertyExpression } ')' '(' { DataPropertyExpression } ')' ')'
"""
from dataclasses import dataclass
from typing import List, ClassVar, Union, Optional, ForwardRef
from rdflib import URIRef, OWL, Graph, RDF
from rdflib.term import BNode, Literal as RDFLiteral
from funowl.base.fun_owl_base import FunOwlBase
from funowl.base.list_support import ListWrapper
from funowl.converters.rdf_converter import SEQ
from funowl.dataproperty_expressions import DataPropertyExpression
from funowl.dataranges import DataRange
from funowl.general_definitions import NonNegativeInteger
from funowl.identifiers import IRI
from funowl.individuals import Individual
from funowl.literals import Literal
from funowl.objectproperty_expressions import ObjectPropertyExpression
# TODO: find out why we can't import this and/or why the types that are currently wrapped in ForwardRef below don't
# work if they are plain strings. Maybe we need 3.8?
from funowl.terminals.TypingHelper import proc_forwards
from funowl.writers import FunctionalWriter
class Class(IRI):
v: IRI.types() = IRI.v_field()
rdf_type: ClassVar[URIRef] = OWL.Class
@dataclass
class ObjectIntersectionOf(FunOwlBase):
classExpressions: List["ClassExpression"]
def __init__(self, *classExpression: "ClassExpression") -> None:
self.classExpressions = list(classExpression)
super().__init__()
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
self.list_cardinality(self.classExpressions, 'exprs', 2)
return w.func(self, lambda: w.iter(self.classExpressions))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Class .
# _:x owl:intersectionOf T(SEQ CE1 ... CEn) .
subj = BNode()
g.add((subj, RDF.type, OWL.Class))
g.add((subj, OWL.intersectionOf, SEQ(g, self.classExpressions)))
return subj
@dataclass
class ObjectUnionOf(FunOwlBase):
classExpressions: List["ClassExpression"]
def __init__(self, *classExpression: "ClassExpression") -> None:
self.classExpressions = ListWrapper(classExpression, ClassExpression)
super().__init__()
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
self.list_cardinality(self.classExpressions, 'exprs', 2)
return w.func(self, lambda: w.iter(self.classExpressions))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Class .
# _:x owl:unionOf T(SEQ CE1 ... CEn) .
x = BNode()
g.add((x, RDF.type, OWL.Class))
g.add((x, OWL.unionOf, SEQ(g, self.classExpressions)))
return x
@dataclass
class ObjectComplementOf(FunOwlBase):
classExpression:ForwardRef("ClassExpression")
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.classExpression)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Class .
# _:x owl:complementOf T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Class))
g.add((x, OWL.complementOf, self.classExpression.to_rdf(g)))
return x
@dataclass(init=False)
class ObjectOneOf(FunOwlBase):
individuals: List[Individual.types()]
def __init__(self, *individual: Individual) -> None:
self.individuals = list(individual)
super().__init__()
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w.iter(self.individuals))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Class .
# _:x owl:oneOf T(SEQ a1 ... an) .
x = BNode()
g.add((x, RDF.type, OWL.Class))
g.add((x, OWL.oneOf, SEQ(g, self.individuals)))
return x
@dataclass
class ObjectSomeValuesFrom(FunOwlBase):
objectPropertyExpression: ObjectPropertyExpression
classExpression: ForwardRef("ClassExpression")
coercion_allowed: ClassVar[bool] = True
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.objectPropertyExpression + self.classExpression)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:someValuesFrom T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
g.add((x, OWL.someValuesFrom, self.classExpression.to_rdf(g)))
return x
@dataclass
class ObjectAllValuesFrom(FunOwlBase):
objectPropertyExpression: ObjectPropertyExpression
classExpression:ForwardRef("ClassExpression")
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.objectPropertyExpression + self.classExpression)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:allValuesFrom T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
g.add((x, OWL.allValuesFrom, self.classExpression.to_rdf(g)))
return x
@dataclass
class ObjectHasValue(FunOwlBase):
objectPropertyExpression: ObjectPropertyExpression
individual: Individual
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.objectPropertyExpression + self.individual)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:hasValue T(a) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
g.add((x, OWL.hasValue, self.individual.to_rdf(g)))
return x
@dataclass
class ObjectHasSelf(FunOwlBase):
objectPropertyExpression: ObjectPropertyExpression
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.objectPropertyExpression)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:hasSelf "true"^^xsd:boolean .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
g.add((x, OWL.hasSelf, RDFLiteral(True)))
return x
@dataclass
class ObjectMinCardinality(FunOwlBase):
min_: NonNegativeInteger
objectPropertyExpression: ObjectPropertyExpression
classExpression: Optional["ClassExpression"] = None
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: (w + self.min_ + self.objectPropertyExpression).opt(self.classExpression))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:minCardinality "n"^^xsd:nonNegativeInteger .
#
# _:x owl:minQualifiedCardinality "n"^^xsd:nonNegativeInteger .
# _:x owl:onClass T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
if self.classExpression:
g.add((x, OWL.minQualifiedCardinality, self.min_.to_rdf(g)))
g.add((x, OWL.onClass, self.classExpression.to_rdf(g)))
else:
g.add((x, OWL.minCardinality, self.min_.to_rdf(g)))
return x
@dataclass
class ObjectMaxCardinality(FunOwlBase):
max_: NonNegativeInteger
objectPropertyExpression: ObjectPropertyExpression
classExpression: Optional["ClassExpression"] = None
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: (w + self.max_ + self.objectPropertyExpression).opt(self.classExpression))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:maxCardinality "n"^^xsd:nonNegativeInteger .
#
# _:x owl:maxQualifiedCardinality "n"^^xsd:nonNegativeInteger .
# _:x owl:onClass T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
if self.classExpression:
g.add((x, OWL.maxQualifiedCardinality, self.max_.to_rdf(g)))
g.add((x, OWL.onClass, self.classExpression.to_rdf(g)))
else:
g.add((x, OWL.maxCardinality, self.max_.to_rdf(g)))
return x
@dataclass
class ObjectExactCardinality(FunOwlBase):
card: NonNegativeInteger
objectPropertyExpression: ObjectPropertyExpression
classExpression: Optional["ClassExpression"] = None
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: (w + self.card + self.objectPropertyExpression).opt(self.classExpression))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(OPE) .
# _:x owl:cardinality "n"^^xsd:nonNegativeInteger .
# or
# _:x owl:qualifiedCardinality "n"^^xsd:nonNegativeInteger .
# _:x owl:onClass T(CE) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.objectPropertyExpression.to_rdf(g)))
if self.classExpression:
g.add((x, OWL.qualifiedCardinality, self.card.to_rdf(g)))
g.add((x, OWL.onClass, self.classExpression.to_rdf(g)))
else:
g.add((x, OWL.cardinality, self.card.to_rdf(g)))
return x
@dataclass
class DataSomeValuesFrom(FunOwlBase):
dataPropertyExpressions: List[DataPropertyExpression]
dataRange: DataRange
def __init__(self, *dataPropertyExpressions: Union[DataPropertyExpression, DataRange]) -> None:
self.dataPropertyExpressions = list(dataPropertyExpressions[:-1])
self.dataRange = dataPropertyExpressions[-1]
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w.iter(self.dataPropertyExpressions) + self.dataRange)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# N == 1
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(DPE) .
# _:x owl:someValuesFrom T(DR) .
# N >= 2
# _:x rdf:type owl:Restriction .
# _:x owl:onProperties T(SEQ DPE1 ... DPEn) .
# _:x owl:someValuesFrom T(DR) .
subj = BNode()
g.add((subj, RDF.type, OWL.Restriction))
if len(self.dataPropertyExpressions) >= 2:
g.add((subj, OWL.onProperties, SEQ(g, self.dataPropertyExpressions)))
else:
g.add((subj, OWL.onProperty, self.dataPropertyExpressions[0].to_rdf(g)))
g.add((subj, OWL.someValuesFrom, self.dataRange.to_rdf(g)))
return subj
@dataclass
class DataAllValuesFrom(FunOwlBase):
dataPropertyExpressions: List[DataPropertyExpression]
dataRange: DataRange
def __init__(self, *dataPropertyExpressions: Union[DataPropertyExpression, DataRange]) -> None:
self.dataPropertyExpressions = list(dataPropertyExpressions[:-1])
self.dataRange = dataPropertyExpressions[-1]
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w.iter(self.dataPropertyExpressions) + self.dataRange)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# N == 1
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(DPE) .
# _:x owl:allValuesFrom T(DR) .
# N >= 2
# _:x rdf:type owl:Restriction .
# _:x owl:onProperties T(SEQ DPE1 ... DPEn) .
# _:x owl:allValuesFrom T(DR) .
subj = BNode()
g.add((subj, RDF.type, OWL.Restriction))
if len(self.dataPropertyExpressions) >= 2:
g.add((subj, OWL.onProperties, SEQ(g, self.dataPropertyExpressions)))
else:
g.add((subj, OWL.onProperty, self.dataPropertyExpressions[0].to_rdf(g)))
g.add((subj, OWL.allValuesFrom, self.dataRange.to_rdf(g)))
return subj
@dataclass
class DataHasValue(FunOwlBase):
dataPropertyExpression: DataPropertyExpression
literal: Literal
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: (w + self.dataPropertyExpression + self.literal))
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> BNode:
# _:x rdf:type owl:Restriction .
# _:x owl:onProperty T(DPE) .
# _:x owl:hasValue T(lt) .
x = BNode()
g.add((x, RDF.type, OWL.Restriction))
g.add((x, OWL.onProperty, self.dataPropertyExpression.to_rdf(g)))
g.add((x, OWL.hasValue, self.literal.to_rdf(g)))
return x
@dataclass
class DataMinCardinality(FunOwlBase):
min_: NonNegativeInteger
dataPropertyExpression: DataPropertyExpression
dataRange: Optional[DataRange] = None
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: (w + self.min_ + self.dataPropertyExpression).opt(self.dataRange))
def to_rdf(self, g: Graph, emit_type_arc: bool | |
<filename>tests/metadata/test_ddl.py
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala tests for DDL statements
import logging
import pytest
import shlex
import time
from tests.common.test_result_verifier import *
from subprocess import call
from tests.common.test_vector import *
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import *
from tests.util.filesystem_utils import WAREHOUSE
# Validates DDL statements (create, drop)
class TestDdlStatements(ImpalaTestSuite):
TEST_DBS = ['ddl_test_db', 'alter_table_test_db', 'alter_table_test_db2',
'function_ddl_test', 'udf_test', 'data_src_test']
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestDdlStatements, cls).add_test_dimensions()
sync_ddl_opts = [0, 1]
if cls.exploration_strategy() != 'exhaustive':
# Only run with sync_ddl on exhaustive since it increases test runtime.
sync_ddl_opts = [0]
cls.TestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=ALL_NODES_ONLY,
disable_codegen_options=[False],
batch_sizes=[0],
sync_ddl=sync_ddl_opts))
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def setup_method(self, method):
self.cleanup()
# Get the current number of queries that are in the 'EXCEPTION' state. Used for
# verification after running each test case.
self.start_exception_count = self.query_exception_count()
self.cleanup_hdfs_dirs()
def teardown_method(self, method):
end_exception_count = self.query_exception_count()
# The number of exceptions may be < than what was in setup if the queries in the
# EXCEPTION state were bumped out of the FINISHED list. We should never see an
# increase in the number of queries in the exception state.
assert end_exception_count <= self.start_exception_count
def query_exception_count(self):
"""Returns the number of occurrences of 'EXCEPTION' on the debug /queries page"""
return len(re.findall('EXCEPTION',
self.impalad_test_service.read_debug_webpage('queries')))
def cleanup(self):
map(self.cleanup_db, self.TEST_DBS)
self.cleanup_hdfs_dirs()
def cleanup_hdfs_dirs(self):
# Cleanup the test table HDFS dirs between test runs so there are no errors the next
# time a table is created with the same location. This also helps remove any stale
# data from the last test run.
self.hdfs_client.delete_file_dir("test-warehouse/part_data/", recursive=True)
self.hdfs_client.delete_file_dir("test-warehouse/t1_tmp1/", recursive=True)
self.hdfs_client.delete_file_dir("test-warehouse/t_part_tmp/", recursive=True)
@skip_if_s3_hdfs_client # S3: missing coverage: drop table/database
@pytest.mark.execute_serially
def test_drop_cleans_hdfs_dirs(self):
self.hdfs_client.delete_file_dir("test-warehouse/ddl_test_db.db/", recursive=True)
assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/")
self.client.execute('use default')
self.client.execute('create database ddl_test_db')
# Verify the db directory exists
assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/")
self.client.execute("create table ddl_test_db.t1(i int)")
# Verify the table directory exists
assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/t1/")
# Dropping the table removes the table's directory and preserves the db's directory
self.client.execute("drop table ddl_test_db.t1")
assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/t1/")
assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/")
# Dropping the db removes the db's directory
self.client.execute("drop database ddl_test_db")
assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/")
@skip_if_s3_insert
@pytest.mark.execute_serially
def test_create(self, vector):
vector.get_value('exec_option')['abort_on_error'] = False
self.__create_db_synced('ddl_test_db', vector)
self.run_test_case('QueryTest/create', vector, use_db='ddl_test_db',
multiple_impalad=self.__use_multiple_impalad(vector))
@pytest.mark.execute_serially
def test_sync_ddl_drop(self, vector):
"""Verifies the catalog gets updated properly when dropping objects with sync_ddl
enabled"""
self.client.set_configuration({'sync_ddl': 0})
if IS_DEFAULT_FS:
self.client.execute('create database ddl_test_db')
else:
self.client.execute("create database ddl_test_db location "
"'%s/ddl_test_db.db'" % WAREHOUSE)
self.client.set_configuration({'sync_ddl': 1})
# Drop the database immediately after creation (within a statestore heartbeat) and
# verify the catalog gets updated properly.
self.client.execute('drop database ddl_test_db')
assert 'ddl_test_db' not in self.client.execute("show databases").data
# TODO: don't use hdfs_client
@skip_if_s3_insert # S3: missing coverage: alter table
@pytest.mark.execute_serially
def test_alter_table(self, vector):
vector.get_value('exec_option')['abort_on_error'] = False
# Create directory for partition data that does not use the (key=value)
# format.
self.hdfs_client.make_dir("test-warehouse/part_data/", permission=777)
self.hdfs_client.create_file("test-warehouse/part_data/data.txt", file_data='1984')
# Create test databases
self.__create_db_synced('alter_table_test_db', vector)
self.__create_db_synced('alter_table_test_db2', vector)
self.run_test_case('QueryTest/alter-table', vector, use_db='alter_table_test_db',
multiple_impalad=self.__use_multiple_impalad(vector))
@pytest.mark.execute_serially
def test_views_ddl(self, vector):
vector.get_value('exec_option')['abort_on_error'] = False
self.__create_db_synced('ddl_test_db', vector)
self.run_test_case('QueryTest/views-ddl', vector, use_db='ddl_test_db',
multiple_impalad=self.__use_multiple_impalad(vector))
@pytest.mark.execute_serially
def test_functions_ddl(self, vector):
self.__create_db_synced('function_ddl_test', vector)
self.run_test_case('QueryTest/functions-ddl', vector, use_db='function_ddl_test',
multiple_impalad=self.__use_multiple_impalad(vector))
@pytest.mark.execute_serially
def test_create_drop_function(self, vector):
# This will create, run, and drop the same function repeatedly, exercising the
# lib cache mechanism.
create_fn_stmt = ("create function f() returns int "
"location '%s/libTestUdfs.so' symbol='NoArgs'" % WAREHOUSE)
select_stmt = "select f() from functional.alltypes limit 10"
drop_fn_stmt = "drop function %s f()"
self.create_drop_ddl(vector, "udf_test", [create_fn_stmt], [drop_fn_stmt],
select_stmt)
@pytest.mark.execute_serially
def test_create_drop_data_src(self, vector):
# This will create, run, and drop the same data source repeatedly, exercising
# the lib cache mechanism.
create_ds_stmt = ("CREATE DATA SOURCE test_data_src "
"LOCATION '%s/data-sources/test-data-source.jar' "
"CLASS 'com.cloudera.impala.extdatasource.AllTypesDataSource' "
"API_VERSION 'V1'" % WAREHOUSE)
create_tbl_stmt = """CREATE TABLE data_src_tbl (x int)
PRODUCED BY DATA SOURCE test_data_src"""
drop_ds_stmt = "drop data source %s test_data_src"
drop_tbl_stmt = "drop table %s data_src_tbl"
select_stmt = "select * from data_src_tbl limit 1"
create_stmts = [create_ds_stmt, create_tbl_stmt]
drop_stmts = [drop_tbl_stmt, drop_ds_stmt]
self.create_drop_ddl(vector, "data_src_test", create_stmts, drop_stmts,
select_stmt)
def create_drop_ddl(self, vector, db_name, create_stmts, drop_stmts, select_stmt):
# Helper method to run CREATE/DROP DDL commands repeatedly and exercise the lib cache
# create_stmts is the list of CREATE statements to be executed in order drop_stmts is
# the list of DROP statements to be executed in order. Each statement should have a
# '%s' placeholder to insert "IF EXISTS" or "". The select_stmt is just a single
# statement to test after executing the CREATE statements.
# TODO: it's hard to tell that the cache is working (i.e. if it did nothing to drop
# the cache, these tests would still pass). Testing that is a bit harder and requires
# us to update the udf binary in the middle.
self.__create_db_synced(db_name, vector)
self.client.set_configuration(vector.get_value('exec_option'))
self.client.execute("use %s" % (db_name,))
for drop_stmt in drop_stmts: self.client.execute(drop_stmt % ("if exists"))
for i in xrange(1, 10):
for create_stmt in create_stmts: self.client.execute(create_stmt)
self.client.execute(select_stmt)
for drop_stmt in drop_stmts: self.client.execute(drop_stmt % (""))
@pytest.mark.execute_serially
def test_create_alter_bulk_partition(self, vector):
# Change the scale depending on the exploration strategy, with 50 partitions this
# takes a few minutes to run, with 10 partitions it takes ~50s for two configurations.
num_parts = 50
if self.exploration_strategy() != 'exhaustive': num_parts = 10
self.client.execute("use default")
self.client.execute("drop table if exists foo_part")
self.client.execute("create table foo_part(i int) partitioned by(j int, s string)")
# Add some partitions (first batch of two)
for i in xrange(num_parts / 5):
start = time.time()
self.client.execute("alter table foo_part add partition(j=%d, s='%s')" % (i, i))
print 'ADD PARTITION #%d exec time: %s' % (i, time.time() - start)
# Modify one of the partitions
self.client.execute("""alter table foo_part partition(j=1, s='1')
set fileformat parquetfile""")
# Alter one partition to a non-existent location twice (IMPALA-741)
self.hdfs_client.delete_file_dir("tmp/dont_exist1/", recursive=True)
self.hdfs_client.delete_file_dir("tmp/dont_exist2/", recursive=True)
self.execute_query_expect_success(self.client,
"alter table foo_part partition(j=1,s='1') set location '/tmp/dont_exist1'")
self.execute_query_expect_success(self.client,
"alter table foo_part partition(j=1,s='1') set location '/tmp/dont_exist2'")
# Add some more partitions
for i in xrange(num_parts / 5, num_parts):
start = time.time()
self.client.execute("alter table foo_part add partition(j=%d,s='%s')" % (i,i))
print 'ADD PARTITION #%d exec time: %s' % (i, time.time() - start)
# Insert data and verify it shows up.
self.client.execute("insert into table foo_part partition(j=1, s='1') select 1")
assert '1' == self.execute_scalar("select count(*) from foo_part")
self.client.execute("drop table foo_part")
@pytest.mark.execute_serially
def test_create_alter_tbl_properties(self, vector):
self.__create_db_synced('alter_table_test_db', vector)
self.client.execute("use alter_table_test_db")
# Specify TBLPROPERTIES and SERDEPROPERTIES at CREATE time
self.client.execute("""create table test_alter_tbl (i int)
with serdeproperties ('s1'='s2', 's3'='s4')
tblproperties ('p1'='v0', 'p1'='v1')""")
properties = self.__get_tbl_properties('test_alter_tbl')
assert len(properties) == 2
# The transient_lastDdlTime is variable, so don't verify the value.
assert 'transient_lastDdlTime' in properties
del properties['transient_lastDdlTime']
assert {'p1': 'v1'} == properties
properties = self.__get_serde_properties('test_alter_tbl')
assert {'s1': 's2', 's3': 's4'} == properties
# Modify the SERDEPROPERTIES using ALTER TABLE SET.
self.client.execute("alter table test_alter_tbl set serdeproperties "\
"('s1'='new', 's5'='s6')")
properties = self.__get_serde_properties('test_alter_tbl')
assert {'s1': 'new', 's3': 's4', 's5': 's6'} == properties
# Modify the TBLPROPERTIES using ALTER TABLE SET.
self.client.execute("alter table test_alter_tbl set tblproperties "\
"('prop1'='val1', 'p2'='val2', 'p2'='val3', ''='')")
properties = self.__get_tbl_properties('test_alter_tbl')
assert 'transient_lastDdlTime' in properties
assert properties['p1'] == 'v1'
assert properties['prop1'] == 'val1'
assert properties['p2'] == 'val3'
assert properties[''] == ''
@classmethod
def __use_multiple_impalad(cls, vector):
return vector.get_value('exec_option')['sync_ddl'] == 1
@classmethod
def __create_db_synced(cls, db_name, vector):
"""Creates a database using synchronized DDL to ensure all nodes have the test
database available for use before executing the .test file(s).
"""
cls.client.execute('use default')
cls.client.set_configuration({'sync_ddl': 1})
if IS_DEFAULT_FS:
cls.client.execute("create database %s" % db_name)
else:
cls.client.execute("create database %s location "
"'%s/%s.db'" % (db_name, WAREHOUSE, db_name))
cls.client.set_configuration(vector.get_value('exec_option'))
def __get_tbl_properties(self, table_name):
"""Extracts the table properties mapping from the output of DESCRIBE FORMATTED"""
return self.__get_properties('Table Parameters:', table_name)
def __get_serde_properties(self, table_name):
"""Extracts the serde properties mapping from the output of DESCRIBE FORMATTED"""
return self.__get_properties('Storage | |
11:00",
"2009/9/26 12:00",
"2009/9/26 13:00",
"2009/9/26 14:00",
"2009/9/26 15:00",
"2009/9/26 16:00",
"2009/9/26 17:00",
"2009/9/26 18:00",
"2009/9/26 19:00",
"2009/9/26 20:00",
"2009/9/26 21:00",
"2009/9/26 22:00",
"2009/9/26 23:00",
"2009/9/27 0:00",
"2009/9/27 1:00",
"2009/9/27 2:00",
"2009/9/27 3:00",
"2009/9/27 4:00",
"2009/9/27 5:00",
"2009/9/27 6:00",
"2009/9/27 7:00",
"2009/9/27 8:00",
"2009/9/27 9:00",
"2009/9/27 10:00",
"2009/9/27 11:00",
"2009/9/27 12:00",
"2009/9/27 13:00",
"2009/9/27 14:00",
"2009/9/27 15:00",
"2009/9/27 16:00",
"2009/9/27 17:00",
"2009/9/27 18:00",
"2009/9/27 19:00",
"2009/9/27 20:00",
"2009/9/27 21:00",
"2009/9/27 22:00",
"2009/9/27 23:00",
"2009/9/28 0:00",
"2009/9/28 1:00",
"2009/9/28 2:00",
"2009/9/28 3:00",
"2009/9/28 4:00",
"2009/9/28 5:00",
"2009/9/28 6:00",
"2009/9/28 7:00",
"2009/9/28 8:00",
"2009/9/28 9:00",
"2009/9/28 10:00",
"2009/9/28 11:00",
"2009/9/28 12:00",
"2009/9/28 13:00",
"2009/9/28 14:00",
"2009/9/28 15:00",
"2009/9/28 16:00",
"2009/9/28 17:00",
"2009/9/28 18:00",
"2009/9/28 19:00",
"2009/9/28 20:00",
"2009/9/28 21:00",
"2009/9/28 22:00",
"2009/9/28 23:00",
"2009/9/29 0:00",
"2009/9/29 1:00",
"2009/9/29 2:00",
"2009/9/29 3:00",
"2009/9/29 4:00",
"2009/9/29 5:00",
"2009/9/29 6:00",
"2009/9/29 7:00",
"2009/9/29 8:00",
"2009/9/29 9:00",
"2009/9/29 10:00",
"2009/9/29 11:00",
"2009/9/29 12:00",
"2009/9/29 13:00",
"2009/9/29 14:00",
"2009/9/29 15:00",
"2009/9/29 16:00",
"2009/9/29 17:00",
"2009/9/29 18:00",
"2009/9/29 19:00",
"2009/9/29 20:00",
"2009/9/29 21:00",
"2009/9/29 22:00",
"2009/9/29 23:00",
"2009/9/30 0:00",
"2009/9/30 1:00",
"2009/9/30 2:00",
"2009/9/30 3:00",
"2009/9/30 4:00",
"2009/9/30 5:00",
"2009/9/30 6:00",
"2009/9/30 7:00",
"2009/9/30 8:00",
"2009/9/30 9:00",
"2009/9/30 10:00",
"2009/9/30 11:00",
"2009/9/30 12:00",
"2009/9/30 13:00",
"2009/9/30 14:00",
"2009/9/30 15:00",
"2009/9/30 16:00",
"2009/9/30 17:00",
"2009/9/30 18:00",
"2009/9/30 19:00",
"2009/9/30 20:00",
"2009/9/30 21:00",
"2009/9/30 22:00",
"2009/9/30 23:00",
"2009/10/1 0:00",
"2009/10/1 1:00",
"2009/10/1 2:00",
"2009/10/1 3:00",
"2009/10/1 4:00",
"2009/10/1 5:00",
"2009/10/1 6:00",
"2009/10/1 7:00",
"2009/10/1 8:00",
"2009/10/1 9:00",
"2009/10/1 10:00",
"2009/10/1 11:00",
"2009/10/1 12:00",
"2009/10/1 13:00",
"2009/10/1 14:00",
"2009/10/1 15:00",
"2009/10/1 16:00",
"2009/10/1 17:00",
"2009/10/1 18:00",
"2009/10/1 19:00",
"2009/10/1 20:00",
"2009/10/1 21:00",
"2009/10/1 22:00",
"2009/10/1 23:00",
"2009/10/2 0:00",
"2009/10/2 1:00",
"2009/10/2 2:00",
"2009/10/2 3:00",
"2009/10/2 4:00",
"2009/10/2 5:00",
"2009/10/2 6:00",
"2009/10/2 7:00",
"2009/10/2 8:00",
"2009/10/2 9:00",
"2009/10/2 10:00",
"2009/10/2 11:00",
"2009/10/2 12:00",
"2009/10/2 13:00",
"2009/10/2 14:00",
"2009/10/2 15:00",
"2009/10/2 16:00",
"2009/10/2 17:00",
"2009/10/2 18:00",
"2009/10/2 19:00",
"2009/10/2 20:00",
"2009/10/2 21:00",
"2009/10/2 22:00",
"2009/10/2 23:00",
"2009/10/3 0:00",
"2009/10/3 1:00",
"2009/10/3 2:00",
"2009/10/3 3:00",
"2009/10/3 4:00",
"2009/10/3 5:00",
"2009/10/3 6:00",
"2009/10/3 7:00",
"2009/10/3 8:00",
"2009/10/3 9:00",
"2009/10/3 10:00",
"2009/10/3 11:00",
"2009/10/3 12:00",
"2009/10/3 13:00",
"2009/10/3 14:00",
"2009/10/3 15:00",
"2009/10/3 16:00",
"2009/10/3 17:00",
"2009/10/3 18:00",
"2009/10/3 19:00",
"2009/10/3 20:00",
"2009/10/3 21:00",
"2009/10/3 22:00",
"2009/10/3 23:00",
"2009/10/4 0:00",
"2009/10/4 1:00",
"2009/10/4 2:00",
"2009/10/4 3:00",
"2009/10/4 4:00",
"2009/10/4 5:00",
"2009/10/4 6:00",
"2009/10/4 7:00",
"2009/10/4 8:00",
"2009/10/4 9:00",
"2009/10/4 10:00",
"2009/10/4 11:00",
"2009/10/4 12:00",
"2009/10/4 13:00",
"2009/10/4 14:00",
"2009/10/4 15:00",
"2009/10/4 16:00",
"2009/10/4 17:00",
"2009/10/4 18:00",
"2009/10/4 19:00",
"2009/10/4 20:00",
"2009/10/4 21:00",
"2009/10/4 22:00",
"2009/10/4 23:00",
"2009/10/5 0:00",
"2009/10/5 1:00",
"2009/10/5 2:00",
"2009/10/5 3:00",
"2009/10/5 4:00",
"2009/10/5 5:00",
"2009/10/5 6:00",
"2009/10/5 7:00",
"2009/10/5 8:00",
"2009/10/5 9:00",
"2009/10/5 10:00",
"2009/10/5 11:00",
"2009/10/5 12:00",
"2009/10/5 13:00",
"2009/10/5 14:00",
"2009/10/5 15:00",
"2009/10/5 16:00",
"2009/10/5 17:00",
"2009/10/5 18:00",
"2009/10/5 19:00",
"2009/10/5 20:00",
"2009/10/5 21:00",
"2009/10/5 22:00",
"2009/10/5 23:00",
"2009/10/6 0:00",
"2009/10/6 1:00",
"2009/10/6 2:00",
"2009/10/6 3:00",
"2009/10/6 4:00",
"2009/10/6 5:00",
"2009/10/6 6:00",
"2009/10/6 7:00",
"2009/10/6 8:00",
"2009/10/6 9:00",
"2009/10/6 10:00",
"2009/10/6 11:00",
"2009/10/6 12:00",
"2009/10/6 13:00",
"2009/10/6 14:00",
"2009/10/6 15:00",
"2009/10/6 16:00",
"2009/10/6 17:00",
"2009/10/6 18:00",
"2009/10/6 19:00",
"2009/10/6 20:00",
"2009/10/6 21:00",
"2009/10/6 22:00",
"2009/10/6 23:00",
"2009/10/7 0:00",
"2009/10/7 1:00",
"2009/10/7 2:00",
"2009/10/7 3:00",
"2009/10/7 4:00",
"2009/10/7 5:00",
"2009/10/7 6:00",
"2009/10/7 7:00",
"2009/10/7 8:00",
"2009/10/7 9:00",
"2009/10/7 10:00",
"2009/10/7 11:00",
"2009/10/7 12:00",
"2009/10/7 13:00",
"2009/10/7 14:00",
"2009/10/7 15:00",
"2009/10/7 16:00",
"2009/10/7 17:00",
"2009/10/7 18:00",
"2009/10/7 19:00",
"2009/10/7 20:00",
"2009/10/7 21:00",
"2009/10/7 22:00",
"2009/10/7 23:00",
"2009/10/8 0:00",
"2009/10/8 1:00",
"2009/10/8 2:00",
"2009/10/8 3:00",
"2009/10/8 4:00",
"2009/10/8 5:00",
"2009/10/8 6:00",
"2009/10/8 7:00",
"2009/10/8 8:00",
"2009/10/8 9:00",
"2009/10/8 10:00",
"2009/10/8 11:00",
"2009/10/8 12:00",
"2009/10/8 13:00",
"2009/10/8 14:00",
"2009/10/8 15:00",
"2009/10/8 16:00",
"2009/10/8 17:00",
"2009/10/8 18:00",
"2009/10/8 19:00",
"2009/10/8 20:00",
"2009/10/8 21:00",
"2009/10/8 22:00",
"2009/10/8 23:00",
"2009/10/9 0:00",
"2009/10/9 1:00",
"2009/10/9 2:00",
"2009/10/9 3:00",
"2009/10/9 4:00",
"2009/10/9 5:00",
"2009/10/9 6:00",
"2009/10/9 7:00",
"2009/10/9 8:00",
"2009/10/9 9:00",
"2009/10/9 10:00",
"2009/10/9 11:00",
"2009/10/9 12:00",
"2009/10/9 13:00",
"2009/10/9 14:00",
"2009/10/9 15:00",
"2009/10/9 16:00",
"2009/10/9 17:00",
"2009/10/9 18:00",
"2009/10/9 19:00",
"2009/10/9 20:00",
"2009/10/9 21:00",
"2009/10/9 22:00",
"2009/10/9 23:00",
"2009/10/10 0:00",
"2009/10/10 1:00",
"2009/10/10 2:00",
"2009/10/10 3:00",
"2009/10/10 4:00",
"2009/10/10 5:00",
"2009/10/10 6:00",
"2009/10/10 7:00",
"2009/10/10 8:00",
"2009/10/10 9:00",
"2009/10/10 10:00",
"2009/10/10 11:00",
"2009/10/10 12:00",
"2009/10/10 13:00",
"2009/10/10 14:00",
"2009/10/10 15:00",
"2009/10/10 16:00",
"2009/10/10 17:00",
"2009/10/10 18:00",
"2009/10/10 19:00",
"2009/10/10 20:00",
"2009/10/10 21:00",
"2009/10/10 22:00",
"2009/10/10 23:00",
"2009/10/11 0:00",
"2009/10/11 1:00",
"2009/10/11 2:00",
"2009/10/11 3:00",
"2009/10/11 4:00",
"2009/10/11 5:00",
"2009/10/11 6:00",
"2009/10/11 7:00",
"2009/10/11 8:00",
"2009/10/11 9:00",
"2009/10/11 10:00",
"2009/10/11 11:00",
"2009/10/11 12:00",
"2009/10/11 13:00",
"2009/10/11 14:00",
"2009/10/11 15:00",
"2009/10/11 16:00",
"2009/10/11 17:00",
"2009/10/11 18:00",
"2009/10/11 19:00",
"2009/10/11 20:00",
"2009/10/11 21:00",
"2009/10/11 22:00",
"2009/10/11 23:00",
"2009/10/12 0:00",
"2009/10/12 1:00",
"2009/10/12 2:00",
"2009/10/12 3:00",
"2009/10/12 4:00",
"2009/10/12 5:00",
"2009/10/12 6:00",
"2009/10/12 7:00",
"2009/10/12 8:00",
"2009/10/12 9:00",
"2009/10/12 10:00",
"2009/10/12 11:00",
"2009/10/12 12:00",
"2009/10/12 13:00",
"2009/10/12 14:00",
"2009/10/12 15:00",
"2009/10/12 16:00",
"2009/10/12 17:00",
"2009/10/12 18:00",
"2009/10/12 19:00",
"2009/10/12 20:00",
"2009/10/12 21:00",
"2009/10/12 22:00",
"2009/10/12 23:00",
"2009/10/13 0:00",
"2009/10/13 1:00",
"2009/10/13 2:00",
"2009/10/13 3:00",
"2009/10/13 4:00",
"2009/10/13 5:00",
"2009/10/13 6:00",
"2009/10/13 7:00",
"2009/10/13 8:00",
"2009/10/13 9:00",
"2009/10/13 10:00",
"2009/10/13 11:00",
"2009/10/13 12:00",
"2009/10/13 13:00",
"2009/10/13 14:00",
"2009/10/13 15:00",
"2009/10/13 16:00",
"2009/10/13 17:00",
"2009/10/13 18:00",
"2009/10/13 19:00",
"2009/10/13 20:00",
"2009/10/13 21:00",
"2009/10/13 22:00",
"2009/10/13 23:00",
"2009/10/14 0:00",
"2009/10/14 1:00",
"2009/10/14 2:00",
"2009/10/14 3:00",
"2009/10/14 4:00",
"2009/10/14 5:00",
"2009/10/14 6:00",
"2009/10/14 7:00",
"2009/10/14 8:00",
"2009/10/14 9:00",
"2009/10/14 10:00",
"2009/10/14 11:00",
"2009/10/14 12:00",
"2009/10/14 13:00",
"2009/10/14 14:00",
"2009/10/14 15:00",
"2009/10/14 16:00",
"2009/10/14 17:00",
"2009/10/14 18:00",
"2009/10/14 19:00",
"2009/10/14 20:00",
"2009/10/14 21:00",
"2009/10/14 22:00",
"2009/10/14 23:00",
"2009/10/15 0:00",
"2009/10/15 1:00",
"2009/10/15 2:00",
"2009/10/15 3:00",
"2009/10/15 4:00",
"2009/10/15 5:00",
"2009/10/15 6:00",
"2009/10/15 7:00",
"2009/10/15 8:00",
"2009/10/15 9:00",
"2009/10/15 10:00",
"2009/10/15 11:00",
"2009/10/15 12:00",
"2009/10/15 13:00",
"2009/10/15 14:00",
"2009/10/15 15:00",
"2009/10/15 16:00",
"2009/10/15 17:00",
"2009/10/15 18:00",
"2009/10/15 19:00",
"2009/10/15 20:00",
"2009/10/15 21:00",
"2009/10/15 22:00",
"2009/10/15 23:00",
"2009/10/16 0:00",
"2009/10/16 1:00",
"2009/10/16 2:00",
"2009/10/16 3:00",
"2009/10/16 4:00",
"2009/10/16 5:00",
"2009/10/16 6:00",
"2009/10/16 7:00",
"2009/10/16 8:00",
"2009/10/16 9:00",
"2009/10/16 10:00",
"2009/10/16 11:00",
"2009/10/16 12:00",
"2009/10/16 13:00",
"2009/10/16 14:00",
"2009/10/16 15:00",
"2009/10/16 16:00",
"2009/10/16 17:00",
"2009/10/16 18:00",
"2009/10/16 19:00",
"2009/10/16 20:00",
"2009/10/16 21:00",
"2009/10/16 22:00",
"2009/10/16 23:00",
"2009/10/17 0:00",
"2009/10/17 1:00",
"2009/10/17 2:00",
"2009/10/17 3:00",
"2009/10/17 4:00",
"2009/10/17 5:00",
"2009/10/17 6:00",
"2009/10/17 7:00",
"2009/10/17 8:00",
"2009/10/17 9:00",
"2009/10/17 10:00",
"2009/10/17 11:00",
"2009/10/17 12:00",
"2009/10/17 13:00",
"2009/10/17 14:00",
"2009/10/17 15:00",
"2009/10/17 16:00",
"2009/10/17 17:00",
"2009/10/17 18:00",
"2009/10/17 19:00",
"2009/10/17 20:00",
"2009/10/17 21:00",
"2009/10/17 22:00",
"2009/10/17 23:00",
"2009/10/18 0:00",
"2009/10/18 1:00",
"2009/10/18 2:00",
"2009/10/18 3:00",
"2009/10/18 4:00",
"2009/10/18 5:00",
"2009/10/18 6:00",
"2009/10/18 7:00",
"2009/10/18 8:00",
]
timeData = [d.replace("2009/", "") for d in timeData]
rainfallData = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.005,
0.017,
0.017,
0.017,
0.017,
0.011,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021,
0.026,
0.03,
0.036,
0.036,
0.195,
0.221,
0.019,
0.013,
0.017,
0.03,
0.03,
0.03,
0.046,
0.045,
0.038,
0.084,
0.045,
0.045,
0.037,
0.034,
0.035,
0.036,
0.044,
0.052,
0.048,
0.109,
0.033,
0.029,
0.04,
0.042,
0.042,
0.042,
0.073,
0.076,
0.062,
0.066,
0.066,
0.075,
0.096,
0.128,
0.121,
0.128,
0.14,
0.226,
0.143,
0.097,
0.018,
0,
0,
0,
0,
0,
0.018,
0.047,
0.054,
0.054,
0.054,
0.036,
0.185,
0.009,
0.038,
0.061,
0.077,
0.091,
0.126,
0.69,
0.182,
0.349,
0.231,
0.146,
0.128,
0.167,
0.1,
0.075,
0.071,
0.071,
0.117,
0.01,
0.002,
0.002,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
| |
<gh_stars>1-10
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input#, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import numpy as np
import math
import random
from time import sleep
from numba import cuda, guvectorize
from numba import jit, complex64, int64, float32
from scipy import constants, interpolate
import sdmpy
from rfpipe import calibration
from astropy import time, coordinates
import logging
logger = logging.getLogger(__name__)
logger.setLevel(20)
# Manage the astropy cached IERS data. The rfnodes do not have direct
# network access so we disable the check for out-of-date IERS files.
# This means the data need to be downloaded and put in the right
# spot via some machine that can get on the network.
import astropy.utils.iers
astropy.utils.iers.conf.auto_max_age = None
def update_iers_cache():
"""Update cached astropy IERS files. Must be run from a system
with internet access."""
# Force update of IERS A file:
import astropy.utils.data
astropy.utils.data.download_file(astropy.utils.iers.IERS_A_URL, cache='update')
# Run a dummy UVW calc to make sure astropy gets all
# other files it needs for this.
dummy_xyz = np.array([
[-1604008.7431 , -5042135.8194 , 3553403.7084 ],
[-1601315.9011 , -5041985.30447, 3554808.3081 ]
])
calc_uvw_astropy(time.Time.now(), (0.0,0.0), dummy_xyz)
def getsdm(*args, **kwargs):
""" Wrap sdmpy.SDM to get around schema change error """
try:
sdm = sdmpy.SDM(*args, **kwargs)
except:
kwargs['use_xsd'] = False
sdm = sdmpy.SDM(*args, **kwargs)
return sdm
def phase_shift(data, uvw=None, dl=None, dm=None, dw=None, ints=None):
""" Applies a phase shift to data for a given (dl, dm).
"""
data = np.require(data, requirements='W')
if ints is None:
ints = list(range(len(data)))
if (uvw is not None) and (dl is not None) and (dm is not None):
assert data.shape[1] == uvw[0].shape[0]
assert data.shape[2] == uvw[0].shape[1]
u, v, w = uvw
_phaseshiftlm_jit(data, u, v, w, dl, dm, ints=ints)
elif dw is not None:
assert data.shape[1] == dw.shape[0]
assert data.shape[2] == dw.shape[1]
_phaseshiftdw_jit(data, dw, ints=ints)
else:
logger.warn("phase_shift requires either uvw/dl/dm or dw")
@jit(nogil=True, nopython=True, cache=True)
def _phaseshiftlm_jit(data, u, v, w, dl, dm, ints):
sh = data.shape
if (dl != 0.) or (dm != 0.):
for j in range(sh[1]):
for k in range(sh[2]):
# + np.sqrt(1-dl**2-dm**2)*w[j, k]))
frot = np.exp(-2j*np.pi*(dl*u[j, k] + dm*v[j, k]))
for i in ints:
for l in range(sh[3]): # iterate over pols
# phasor unwraps phase at (dl, dm) per (bl, chan)
data[i, j, k, l] = data[i, j, k, l] * frot
@jit(nogil=True, nopython=True, cache=True)
def _phaseshiftdw_jit(data, dw, ints):
sh = data.shape
for j in range(sh[1]):
for k in range(sh[2]):
frot = np.exp(-2j*np.pi*dw[j, k]) # Q: which sign out front? needs 2pi?
for i in ints:
for l in range(sh[3]): # iterate over pols
# phasor unwraps phase at (dl, dm) per (bl, chan)
data[i, j, k, l] = data[i, j, k, l] * frot
def meantsub(data, mode='mean'):
""" Subtract mean visibility in time.
mode can be set in prefs.timesub as None, 'mean', or '2pt'
"""
# TODO: make outlier resistant to avoid oversubtraction
if mode == None:
logger.info('No visibility subtraction done.')
elif mode == 'mean':
logger.info('Subtracting mean visibility in time.')
_meantsub_jit(np.require(data, requirements='W'))
elif mode == '2pt':
logger.info('Subtracting 2pt time trend in visibility.')
_2ptsub_jit(np.require(data, requirements='W'))
elif mode.startswith('cs') or mode.startswith('ls'):
# Accept for example 'cs5' to specify 5-piece processing
try:
npiece = int(mode[2:])
except ValueError:
npiece = 4
if mode[:2] == 'cs':
kind = 'cubic'
elif mode[:2] == 'ls':
kind = 'linear'
logger.info("Subtracting {0}-piece {1} spline time trend in visibility".format(npiece,kind))
assert len(data) > npiece, "Too few integrations for {0}-piece spline sub".format(npiece)
nint, nbl, nchan, npol = data.shape
dataavg = np.empty((npiece, nbl, nchan, npol), dtype=np.complex64)
_cssub0_jit(np.require(data, requirements='W'), dataavg, npiece)
spline = interpolate.interp1d(np.array([(nint//npiece)*(i+0.5) for i in range(npiece)]),
dataavg, axis=0, fill_value='extrapolate',
kind=kind)
dataavginterp = spline(range(len(data)))
_cssub1_jit(data, dataavginterp)
elif mode == 'linfit':
logger.info("Subtracting linear fit to visibility vs time")
nint, nbl, nchan, npol = data.shape
basis = np.empty((nint,2),dtype=np.float32)
basis[:,0] = 1.0
basis[:,1] = np.arange(nint)/float(nint)
_lssub_jit(data,basis)
elif mode == 'cubfit':
logger.info("Subtracting cubic fit to visibility vs time")
nint, nbl, nchan, npol = data.shape
basis = np.empty((nint,4),dtype=np.float32)
t = 2.0*np.arange(nint)/float(nint) - 1.0
basis[:,0] = 1.0
basis[:,1] = t
basis[:,2] = 0.5*(3.0*(t**2) - 1.0)
basis[:,3] = 0.5*(5.0*(t**3) - 3.0*t)
_lssub_jit(data,basis)
elif mode.startswith('splfit'):
try:
npiece = int(mode[6:])
except ValueError:
npiece = 2
logger.info("Subtracting {0}-piece cubic spline fit to visibility vs time".format(npiece))
nint, nbl, nchan, npol = data.shape
nb = npiece - 1
basis = np.empty((nint,nb+4),dtype=np.float32)
cfunc = np.empty((nint,nb),dtype=np.float32)
t = np.arange(nint) / float(nint)
for i in range(nb):
t0 = (i+1.0) / (nb+1.0)
ctmp = (t - t0)**3
ctmp[np.where(t<t0)] = 0.0
cfunc[:,i] = ctmp
basis[:,0] = 1.0
basis[:,1] = t
basis[:,2] = t**2
basis[:,3] = t**3
for i in range(nb):
basis[:,i+4] = cfunc[:,i]
_lssub_jit(data,basis)
else:
logger.warn("meantsub mode not recognized")
return data
@jit(nogil=True, nopython=True, cache=True)
def _meantsub_jit(data):
""" Calculate mean in time (ignoring zeros) and subtract in place
Could ultimately parallelize by computing only on subset of data.
"""
nint, nbl, nchan, npol = data.shape
for i in range(nbl):
for j in range(nchan):
for k in range(npol):
ss = complex64(0)
weight = int64(0)
for l in range(nint):
ss += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight += 1
if weight > 0:
mean = ss/weight
else:
mean = complex64(0)
if mean:
for l in range(nint):
if data[l, i, j, k] != 0j:
data[l, i, j, k] -= mean
@jit(nogil=True, nopython=True, cache=True)
def _2ptsub_jit(data):
""" Calculate 2-pt time trend and evaluate to subtract at each time.
"""
nint, nbl, nchan, npol = data.shape
for i in range(nbl):
for j in range(nchan):
for k in range(npol):
# first half mean
ss1 = complex64(0)
weight1 = int64(0)
for l in range(0, nint//2):
ss1 += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight1 += 1
if weight1 > 0:
mean1 = ss1/weight1
else:
mean1 = complex64(0)
# second half mean
ss2 = complex64(0)
weight2 = int64(0)
for l in range(nint//2, nint):
ss2 += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight2 += 1
if weight2 > 0:
mean2 = ss2/weight2
else:
mean2 = complex64(0)
# calc mean per int
if mean1 and mean2:
slope = (mean2-mean1)/(nint//2)
mean0 = (mean2+mean1)/2
for l in range(nint):
if data[l, i, j, k] != 0j:
data[l, i, j, k] -= slope*(l-nint//2) + mean0
else: # or just blank data
for l in range(nint):
data[l, i, j, k] = 0j
@jit(nogil=True, nopython=True, cache=True)
def _2ptinterp_jit(data):
""" Calculate 2-pt time trend and evaluate to subtract at each time.
"""
nint, nbl, nchan, npol = data.shape
for i in range(nbl):
for j in range(nchan):
for k in range(npol):
# first half mean
ss1 = complex64(0)
weight1 = int64(0)
for l in range(0, nint//2):
ss1 += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight1 += 1
if weight1 > 0:
mean1 = ss1/weight1
else:
mean1 = complex64(0)
# second half mean
ss2 = complex64(0)
weight2 = int64(0)
for l in range(nint//2, nint):
ss2 += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight2 += 1
if weight2 > 0:
mean2 = ss2/weight2
else:
mean2 = complex64(0)
ff = interpolate.interp1d([nint//4, 3*nint//4], [mean1, mean2],
fill_value='extrapolate')
for l in range(nint):
if data[l, i, j, k] != 0j:
data[l, i, j, k] -= slope*(l-nint//2) + mean0
@jit(nogil=True, nopython=True, cache=True)
def _cssub0_jit(data, dataavg, npiece):
""" Use scipy calculate n-point mean as input to spline estimate.
zeroed data is treated as flagged
"""
nint, nbl, nchan, npol = data.shape
piece = nint//npiece
for i in range(nbl):
for j in range(nchan):
for k in range(npol):
# mean in each piece
for pp in range(npiece):
ss = complex64(0)
weight = int64(0)
for l in range(pp*piece, (pp+1)*piece):
ss += data[l, i, j, k]
if data[l, i, j, k] != 0j:
weight += 1
if weight > 0:
dataavg[pp, i, j, k] = ss/weight
else:
# option 1: replace with nearest, if possible
if j > 0: # nearest channel
dataavg[pp, i, j, k] = dataavg[pp, i, j-1, k]
elif pp > 0: # nearest time piece
dataavg[pp, i, j, k] = dataavg[pp-1, i, j, k]
elif i > 0: # nearest time piece
dataavg[pp, i, | |
LAING DIGIT ONE': 43505,
'MYANMAR TAI LAING DIGIT SEVEN': 43511,
'MYANMAR TAI LAING DIGIT SIX': 43510,
'MYANMAR TAI LAING DIGIT THREE': 43507,
'MYANMAR TAI LAING DIGIT TWO': 43506,
'MYANMAR TAI LAING DIGIT ZERO': 43504,
'NABATAEAN CRUCIFORM NUMBER FOUR': 67755,
'NABATAEAN LETTER ALEPH': 67713,
'NABATAEAN LETTER AYIN': 67735,
'NABATAEAN LETTER BETH': 67715,
'NABATAEAN LETTER DALETH': 67717,
'NABATAEAN LETTER FINAL ALEPH': 67712,
'NABATAEAN LETTER FINAL BETH': 67714,
'NABATAEAN LETTER FINAL HE': 67718,
'NABATAEAN LETTER FINAL KAPH': 67726,
'NABATAEAN LETTER FINAL LAMEDH': 67728,
'NABATAEAN LETTER FINAL MEM': 67730,
'NABATAEAN LETTER FINAL NUN': 67732,
'NABATAEAN LETTER FINAL SHIN': 67740,
'NABATAEAN LETTER FINAL YODH': 67724,
'NABATAEAN LETTER GIMEL': 67716,
'NABATAEAN LETTER HE': 67719,
'NABATAEAN LETTER HETH': 67722,
'NABATAEAN LETTER KAPH': 67727,
'NABATAEAN LETTER LAMEDH': 67729,
'NABATAEAN LETTER MEM': 67731,
'NABATAEAN LETTER NUN': 67733,
'NABATAEAN LETTER PE': 67736,
'NABATAEAN LETTER QOPH': 67738,
'NABATAEAN LETTER RESH': 67739,
'NABATAEAN LETTER SADHE': 67737,
'NABATAEAN LETTER SAMEKH': 67734,
'NABATAEAN LETTER SHIN': 67741,
'NABATAEAN LETTER TAW': 67742,
'NABATAEAN LETTER TETH': 67723,
'NABATAEAN LETTER WAW': 67720,
'NABATAEAN LETTER YODH': 67725,
'NABATAEAN LETTER ZAYIN': 67721,
'NABATAEAN NUMBER FIVE': 67756,
'NABATAEAN NUMBER FOUR': 67754,
'NABATAEAN NUMBER ONE': 67751,
'NABATAEAN NUMBER ONE HUNDRED': 67759,
'NABATAEAN NUMBER TEN': 67757,
'NABATAEAN NUMBER THREE': 67753,
'NABATAEAN NUMBER TWENTY': 67758,
'NABATAEAN NUMBER TWO': 67752,
'NAIL POLISH': 128133,
'NAK': 983092,
'NAME BADGE': 128219,
'NATIONAL PARK': 127966,
'NBH': 983127,
'NBSP': 983196,
'NECKTIE': 128084,
'NEGATIVE ACKNOWLEDGE': 983091,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER A': 127312,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER B': 127313,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER C': 127314,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER D': 127315,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER E': 127316,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER F': 127317,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER G': 127318,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER I': 127320,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER J': 127321,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER K': 127322,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER L': 127323,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER M': 127324,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER N': 127325,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER O': 127326,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER Q': 127328,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER R': 127329,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER S': 127330,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER T': 127331,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER U': 127332,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER V': 127333,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER W': 127334,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER X': 127335,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER Y': 127336,
'NEGATIVE CIRCLED LATIN CAPITAL LETTER Z': 127337,
'NEGATIVE SQUARED AB': 127374,
'NEGATIVE SQUARED CROSS MARK': 10062,
'NEGATIVE SQUARED LATIN CAPITAL LETTER A': 127344,
'NEGATIVE SQUARED LATIN CAPITAL LETTER B': 127345,
'NEGATIVE SQUARED LATIN CAPITAL LETTER C': 127346,
'NEGATIVE SQUARED LATIN CAPITAL LETTER D': 127347,
'NEGATIVE SQUARED LATIN CAPITAL LETTER E': 127348,
'NEGATIVE SQUARED LATIN CAPITAL LETTER F': 127349,
'NEGATIVE SQUARED LATIN CAPITAL LETTER G': 127350,
'NEGATIVE SQUARED LATIN CAPITAL LETTER H': 127351,
'NEGATIVE SQUARED LATIN CAPITAL LETTER I': 127352,
'NEGATIVE SQUARED LATIN CAPITAL LETTER K': 127354,
'NEGATIVE SQUARED LATIN CAPITAL LETTER N': 127357,
'NEGATIVE SQUARED LATIN CAPITAL LETTER O': 127358,
'NEGATIVE SQUARED LATIN CAPITAL LETTER Q': 127360,
'NEGATIVE SQUARED LATIN CAPITAL LETTER R': 127361,
'NEGATIVE SQUARED LATIN CAPITAL LETTER S': 127362,
'NEGATIVE SQUARED LATIN CAPITAL LETTER T': 127363,
'NEGATIVE SQUARED LATIN CAPITAL LETTER U': 127364,
'NEGATIVE SQUARED LATIN CAPITAL LETTER V': 127365,
'NEGATIVE SQUARED LATIN CAPITAL LETTER W': 127366,
'NEGATIVE SQUARED LATIN CAPITAL LETTER X': 127367,
'NEGATIVE SQUARED LATIN CAPITAL LETTER Y': 127368,
'NEGATIVE SQUARED LATIN CAPITAL LETTER Z': 127369,
'NEGATIVE SQUARED WC': 127375,
'NEL': 983131,
'NERD FACE': 129299,
'NEUTRAL FACE': 128528,
'NEW LINE': 983063,
'NEW MOON SYMBOL': 127761,
'NEW MOON WITH FACE': 127770,
'NEWLINE LEFT': 11154,
'NEWLINE RIGHT': 11155,
'NEWSPAPER': 128240,
'NEXT LINE': 983130,
'NIGHT WITH STARS': 127747,
'NL': 983066,
'NNBSP': 983223,
'NO BICYCLES': 128691,
'NO BREAK HERE': 983126,
'NO ENTRY SIGN': 128683,
'NO MOBILE PHONES': 128245,
'NO ONE UNDER EIGHTEEN SYMBOL': 128286,
'NO PEDESTRIANS': 128695,
'NO PIRACY': 128370,
'NO SMOKING SYMBOL': 128685,
'NON-POTABLE WATER SYMBOL': 128689,
'NORDIC MARK SIGN': 8379,
'NORTH EAST POINTING BUD': 128610,
'NORTH EAST POINTING LEAF': 128594,
'NORTH EAST POINTING VINE LEAF': 128602,
'NORTH EAST SANS-SERIF ARROW': 129109,
'NORTH EAST TRIANGLE-HEADED ARROW': 11111,
'NORTH EAST TRIANGLE-HEADED ARROW TO BAR': 11127,
'NORTH WEST POINTING BUD': 128608,
'NORTH WEST POINTING LEAF': 128592,
'NORTH WEST POINTING VINE LEAF': 128600,
'NORTH WEST SANS-SERIF ARROW': 129108,
'NORTH WEST TRIANGLE-HEADED ARROW': 11110,
'NORTH WEST TRIANGLE-HEADED ARROW TO BAR': 11126,
'NORTHEAST-POINTING AIRPLANE': 128746,
'NOSE': 128067,
'NOTCHED LEFT SEMICIRCLE WITH THREE DOTS': 128323,
'NOTCHED RIGHT SEMICIRCLE WITH THREE DOTS': 128324,
'NOTE': 128456,
'NOTE PAD': 128458,
'NOTE PAGE': 128457,
'NOTEBOOK': 128211,
'NOTEBOOK WITH DECORATIVE COVER': 128212,
'NUL': 983041,
'NULL': 983040,
'NUT AND BOLT': 128297,
'OCTOPUS': 128025,
'ODEN': 127842,
'OFFICE BUILDING': 127970,
'OIL DRUM': 128738,
'OK HAND SIGN': 128076,
'OLD HUNGARIAN CAPITAL LETTER A': 68736,
'OLD HUNGARIAN CAPITAL LETTER AA': 68737,
'OLD HUNGARIAN CAPITAL LETTER AK': 68756,
'OLD HUNGARIAN CAPITAL LETTER AMB': 68739,
'OLD HUNGARIAN CAPITAL LETTER AND': 68744,
'OLD HUNGARIAN CAPITAL LETTER CLOSE E': 68746,
'OLD HUNGARIAN CAPITAL LETTER E': 68745,
'OLD HUNGARIAN CAPITAL LETTER EB': 68738,
'OLD HUNGARIAN CAPITAL LETTER EC': 68740,
'OLD HUNGARIAN CAPITAL LETTER ECH': 68777,
'OLD HUNGARIAN CAPITAL LETTER ECS': 68742,
'OLD HUNGARIAN CAPITAL LETTER ED': 68743,
'OLD HUNGARIAN CAPITAL LETTER EE': 68747,
'OLD HUNGARIAN CAPITAL LETTER EF': 68748,
'OLD HUNGARIAN CAPITAL LETTER EG': 68749,
'OLD HUNGARIAN CAPITAL LETTER EGY': 68750,
'OLD HUNGARIAN CAPITAL LETTER EH': 68751,
'OLD HUNGARIAN CAPITAL LETTER EJ': 68754,
'OLD HUNGARIAN CAPITAL LETTER EK': 68755,
'OLD HUNGARIAN CAPITAL LETTER EL': 68758,
'OLD HUNGARIAN CAPITAL LETTER ELY': 68759,
'OLD HUNGARIAN CAPITAL LETTER EM': 68760,
'OLD HUNGARIAN CAPITAL LETTER EMP': 68769,
'OLD HUNGARIAN CAPITAL LETTER EN': 68761,
'OLD HUNGARIAN CAPITAL LETTER ENC': 68741,
'OLD HUNGARIAN CAPITAL LETTER ENT': 68775,
'OLD HUNGARIAN CAPITAL LETTER ENT-SHAPED SIGN': 68785,
'OLD HUNGARIAN CAPITAL LETTER ENY': 68762,
'OLD HUNGARIAN CAPITAL LETTER EP': 68768,
'OLD HUNGARIAN CAPITAL LETTER ER': 68770,
'OLD HUNGARIAN CAPITAL LETTER ES': 68772,
'OLD HUNGARIAN CAPITAL LETTER ESZ': 68773,
'OLD HUNGARIAN CAPITAL LETTER ET': 68774,
'OLD HUNGARIAN CAPITAL LETTER ETY': 68776,
'OLD HUNGARIAN CAPITAL LETTER EV': 68782,
'OLD HUNGARIAN CAPITAL LETTER EZ': 68783,
'OLD HUNGARIAN CAPITAL LETTER EZS': 68784,
'OLD HUNGARIAN CAPITAL LETTER I': 68752,
'OLD HUNGARIAN CAPITAL LETTER II': 68753,
'OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG OE': 68765,
'OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG UE': 68780,
'OLD HUNGARIAN CAPITAL LETTER O': 68763,
'OLD HUNGARIAN CAPITAL LETTER OEE': 68767,
'OLD HUNGARIAN CAPITAL LETTER OO': 68764,
'OLD HUNGARIAN CAPITAL LETTER RUDIMENTA OE': 68766,
'OLD HUNGARIAN CAPITAL LETTER RUDIMENTA UE': 68781,
'OLD HUNGARIAN CAPITAL LETTER SHORT ER': 68771,
'OLD HUNGARIAN CAPITAL LETTER U': 68778,
'OLD HUNGARIAN CAPITAL LETTER UNK': 68757,
'OLD HUNGARIAN CAPITAL LETTER US': 68786,
'OLD HUNGARIAN CAPITAL LETTER UU': 68779,
'OLD HUNGARIAN NUMBER FIFTY': 68861,
'OLD HUNGARIAN NUMBER FIVE': 68859,
'OLD HUNGARIAN NUMBER ONE': 68858,
'OLD HUNGARIAN NUMBER ONE HUNDRED': 68862,
'OLD HUNGARIAN NUMBER ONE THOUSAND': 68863,
'OLD HUNGARIAN NUMBER TEN': 68860,
'OLD HUNGARIAN SMALL LETTER A': 68800,
'OLD HUNGARIAN SMALL LETTER AA': 68801,
'OLD HUNGARIAN SMALL LETTER AK': 68820,
'OLD HUNGARIAN SMALL LETTER AMB': 68803,
'OLD HUNGARIAN SMALL LETTER AND': 68808,
'OLD HUNGARIAN SMALL LETTER CLOSE E': 68810,
'OLD HUNGARIAN SMALL LETTER E': 68809,
'OLD HUNGARIAN SMALL LETTER EB': 68802,
'OLD HUNGARIAN SMALL LETTER EC': 68804,
'OLD HUNGARIAN SMALL LETTER ECH': 68841,
'OLD HUNGARIAN SMALL LETTER ECS': 68806,
'OLD HUNGARIAN SMALL LETTER ED': 68807,
'OLD HUNGARIAN SMALL LETTER EE': 68811,
'OLD HUNGARIAN SMALL LETTER EF': 68812,
'OLD HUNGARIAN SMALL LETTER EG': 68813,
'OLD HUNGARIAN SMALL LETTER EGY': 68814,
'OLD HUNGARIAN SMALL LETTER EH': 68815,
'OLD HUNGARIAN SMALL LETTER EJ': 68818,
'OLD HUNGARIAN SMALL LETTER EK': 68819,
'OLD HUNGARIAN SMALL LETTER EL': 68822,
'OLD HUNGARIAN SMALL LETTER ELY': 68823,
'OLD HUNGARIAN SMALL LETTER EM': 68824,
'OLD HUNGARIAN SMALL LETTER EMP': 68833,
'OLD HUNGARIAN SMALL LETTER EN': 68825,
'OLD HUNGARIAN SMALL LETTER ENC': 68805,
'OLD HUNGARIAN SMALL LETTER ENT': 68839,
'OLD HUNGARIAN SMALL LETTER ENT-SHAPED SIGN': 68849,
'OLD HUNGARIAN SMALL LETTER ENY': 68826,
'OLD HUNGARIAN SMALL LETTER EP': 68832,
'OLD HUNGARIAN SMALL LETTER ER': 68834,
'OLD HUNGARIAN SMALL LETTER ES': 68836,
'OLD HUNGARIAN SMALL LETTER ESZ': 68837,
'OLD HUNGARIAN SMALL LETTER ET': 68838,
'OLD HUNGARIAN SMALL LETTER ETY': 68840,
'OLD HUNGARIAN SMALL LETTER EV': 68846,
'OLD HUNGARIAN SMALL LETTER EZ': 68847,
'OLD HUNGARIAN SMALL LETTER EZS': 68848,
'OLD HUNGARIAN SMALL LETTER I': 68816,
'OLD HUNGARIAN SMALL LETTER II': 68817,
'OLD HUNGARIAN SMALL LETTER NIKOLSBURG OE': 68829,
'OLD HUNGARIAN SMALL LETTER NIKOLSBURG UE': 68844,
'OLD HUNGARIAN SMALL LETTER O': 68827,
'OLD HUNGARIAN SMALL LETTER OEE': 68831,
'OLD HUNGARIAN SMALL LETTER OO': 68828,
'OLD HUNGARIAN SMALL LETTER RUDIMENTA OE': 68830,
'OLD HUNGARIAN SMALL LETTER RUDIMENTA UE': 68845,
'OLD HUNGARIAN SMALL LETTER SHORT ER': 68835,
'OLD HUNGARIAN SMALL LETTER U': 68842,
'OLD HUNGARIAN SMALL LETTER UNK': 68821,
'OLD HUNGARIAN SMALL LETTER US': 68850,
'OLD HUNGARIAN SMALL LETTER UU': 68843,
'OLD ITALIC LETTER ESS': 66335,
'OLD KEY': 128477,
'OLD NORTH ARABIAN LETTER AIN': 68242,
'OLD NORTH ARABIAN LETTER ALEF': 68241,
'OLD NORTH ARABIAN LETTER BEH': 68232,
'OLD NORTH ARABIAN LETTER DAD': 68243,
'OLD NORTH ARABIAN LETTER DAL': 68245,
'OLD NORTH ARABIAN LETTER ES-1': 68234,
'OLD NORTH ARABIAN LETTER ES-2': 68230,
'OLD NORTH ARABIAN LETTER ES-3': 68239,
'OLD NORTH ARABIAN LETTER FEH': 68240,
'OLD NORTH ARABIAN LETTER GEEM': 68244,
'OLD NORTH ARABIAN LETTER GHAIN': 68246,
'OLD NORTH ARABIAN LETTER HAH': 68226,
'OLD NORTH ARABIAN LETTER HEH': 68224,
'OLD NORTH ARABIAN LETTER KAF': 68235,
'OLD NORTH ARABIAN LETTER KHAH': 68237,
'OLD NORTH ARABIAN LETTER LAM': 68225,
'OLD NORTH ARABIAN LETTER MEEM': 68227,
'OLD NORTH ARABIAN LETTER NOON': 68236,
'OLD NORTH ARABIAN LETTER QAF': 68228,
'OLD NORTH ARABIAN LETTER REH': 68231,
'OLD NORTH ARABIAN LETTER SAD': 68238,
'OLD NORTH ARABIAN LETTER TAH': 68247,
'OLD NORTH ARABIAN LETTER TEH': 68233,
'OLD NORTH ARABIAN LETTER | |
Diabetology">Cardiovascular Diabetology</a></i></td>
<td>Cardiology</td>
<td><a href="/wiki/BioMed_Central" title="BioMed Central">BioMed Central</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/Cephalalgia_(journal)" title="Cephalalgia (journal)">Cephalalgia</a></i></td>
<td>Headache</td>
<td><a href="/wiki/SAGE_Publications" class="mw-redirect" title="SAGE Publications">SAGE Publications</a></td>
<td>English</td>
<td>1981-present
</td></tr>
<tr>
<td><i><a href="/wiki/Chest_(journal)" title="Chest (journal)">Chest</a></i></td>
<td>Cardiology, Respiratory Health</td>
<td><a href="/wiki/American_College_of_Chest_Physicians" title="American College of Chest Physicians">American College of Chest Physicians</a></td>
<td>English</td>
<td>1935-present
</td></tr>
<tr>
<td><i><a href="/wiki/Child:_Care,_Health_and_Development" title="Child: Care, Health and Development">Child: Care, Health and Development</a></i></td>
<td>Pediatrics</td>
<td><a href="/wiki/Wiley-Blackwell" title="Wiley-Blackwell">Wiley-Blackwell</a></td>
<td>English</td>
<td>1975-present
</td></tr>
<tr>
<td><i><a href="/wiki/Chinese_Medical_Journal" title="Chinese Medical Journal">Chinese Medical Journal</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Chinese_Medical_Association" class="mw-redirect" title="Chinese Medical Association">Chinese Medical Association</a>, <a href="/w/index.php?title=Wolters_Kluwer_Medknow&action=edit&redlink=1" class="new" title="Wolters Kluwer Medknow (page does not exist)">Wolters Kluwer Medknow</a></td>
<td>English</td>
<td>1887-present
</td></tr>
<tr>
<td><i><a href="/wiki/Chronic_Illness_(journal)" title="Chronic Illness (journal)">Chronic Illness</a></i></td>
<td>Chronic Illness</td>
<td><a href="/wiki/SAGE_Publications" class="mw-redirect" title="SAGE Publications">SAGE Publications</a></td>
<td>English</td>
<td>2005-present
</td></tr>
<tr>
<td><i><a href="/wiki/Circulation_(journal)" title="Circulation (journal)">Circulation</a></i></td>
<td>Cardiology</td>
<td><a href="/wiki/Lippincott_Williams_%26_Wilkins" title="Lippincott Williams & Wilkins">Lippincott Williams & Wilkins</a></td>
<td>English</td>
<td>1950-present
</td></tr>
<tr>
<td><i><a href="/wiki/The_Cleft_Palate-Craniofacial_Journal" title="The Cleft Palate-Craniofacial Journal">The Cleft Palate-Craniofacial Journal</a></i></td>
<td>Craniofacial Medicine</td>
<td><a href="/wiki/Allen_Press" title="Allen Press">Allen Press</a></td>
<td>English</td>
<td>1964-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Anatomy" title="Clinical Anatomy">Clinical Anatomy</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Wiley-Liss" class="mw-redirect" title="Wiley-Liss">Wiley-Liss</a></td>
<td>English</td>
<td>1988-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_and_Experimental_Gastroenterology" title="Clinical and Experimental Gastroenterology">Clinical and Experimental Gastroenterology</a></i></td>
<td>Gastroenterology</td>
<td><a href="/wiki/Dove_Medical_Press" title="Dove Medical Press">Dove Medical Press</a></td>
<td>English</td>
<td>2008-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_and_Translational_Science" title="Clinical and Translational Science">Clinical and Translational Science</a></i>
</td>
<td>Medicine
</td>
<td><a href="/wiki/Wiley-Blackwell" title="Wiley-Blackwell">Wiley-Blackwell</a>
</td>
<td>English
</td>
<td>2008-Present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Breast_Cancer" title="Clinical Breast Cancer">Clinical Breast Cancer</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2000-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Case_Studies" title="Clinical Case Studies">Clinical Case Studies</a></i></td>
<td>Clinical medicine</td>
<td><a href="/wiki/Sage_Publications" class="mw-redirect" title="Sage Publications">Sage Publications</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Chemistry_(journal)" title="Clinical Chemistry (journal)">Clinical Chemistry</a></i></td>
<td>Medicinal Chemistry</td>
<td><a href="/wiki/American_Association_for_Clinical_Chemistry" title="American Association for Clinical Chemistry">American Association for Clinical Chemistry</a></td>
<td>English</td>
<td>1955-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Colorectal_Cancer" title="Clinical Colorectal Cancer">Clinical Colorectal Cancer</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2001-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Gastroenterology_and_Hepatology" title="Clinical Gastroenterology and Hepatology">Clinical Gastroenterology and Hepatology</a></i></td>
<td>Gastroenterology, Hepatology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2003-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Genitourinary_Cancer" title="Clinical Genitourinary Cancer">Clinical Genitourinary Cancer</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/The_Clinical_Journal_of_Pain" title="The Clinical Journal of Pain">The Clinical Journal of Pain</a></i></td>
<td>Pain Management</td>
<td><a href="/wiki/Lippincott_Williams_%26_Wilkins" title="<NAME> & Wilkins"><NAME> & Wilkins</a></td>
<td>English</td>
<td>1985-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Leukemia" title="Clinical Leukemia">Clinical Leukemia</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/CIG_Media_Group" title="CIG Media Group">CIG Media Group</a></td>
<td>English</td>
<td>2006-2009
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Lung_Cancer" title="Clinical Lung Cancer">Clinical Lung Cancer</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>1999-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Lymphoma,_Myeloma_%26_Leukemia" title="Clinical Lymphoma, Myeloma & Leukemia">Clinical Lymphoma, Myeloma & Leukemia</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2000-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Medicine:_Oncology" class="mw-redirect" title="Clinical Medicine: Oncology">Clinical Medicine: Oncology</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Libertas_Academica" title="Libertas Academica">Libertas Academica</a></td>
<td>English</td>
<td>2007-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Microbiology_Reviews" title="Clinical Microbiology Reviews">Clinical Microbiology Reviews</a></i></td>
<td>Infectious Disease</td>
<td><a href="/wiki/American_Society_for_Microbiology" title="American Society for Microbiology">American Society for Microbiology</a></td>
<td>English</td>
<td>1988-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Ovarian_Cancer" class="mw-redirect" title="Clinical Ovarian Cancer">Clinical Ovarian Cancer</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2008-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Pharmacology:_Advances_and_Applications" title="Clinical Pharmacology: Advances and Applications">Clinical Pharmacology: Advances and Applications</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Dove_Medical_Press" title="Dove Medical Press">Dove Medical Press</a></td>
<td>English</td>
<td>2010-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Pharmacology_%26_Therapeutics" title="Clinical Pharmacology & Therapeutics">Clinical Pharmacology & Therapeutics</a></i>
</td>
<td>Pharmacology
</td>
<td><a href="/wiki/Wiley-Blackwell" title="Wiley-Blackwell">Wiley-Blackwell</a>
</td>
<td>English
</td>
<td>1960-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Science_(journal)" title="Clinical Science (journal)">Clinical Science</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Portland_Press" title="Portland Press">Portland Press</a></td>
<td>English</td>
<td>1909-present
</td></tr>
<tr>
<td><i><a href="/wiki/Clinical_Toxicology" title="Clinical Toxicology">Clinical Toxicology</a></i></td>
<td>Toxicology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1968-present
</td></tr>
<tr>
<td><i><a href="/wiki/Comprehensive_Therapy" title="Comprehensive Therapy">Comprehensive Therapy</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Humana_Press" title="Humana Press">Humana Press</a></td>
<td>English</td>
<td>1975-2010
</td></tr>
<tr>
<td><i><a href="/wiki/Contemporary_Clinical_Trials" title="Contemporary Clinical Trials">Contemporary Clinical Trials</a></i></td>
<td>Research Design</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>1980-present
</td></tr>
<tr>
<td><i><a href="/wiki/COPD:_Journal_of_Chronic_Obstructive_Pulmonary_Disease" title="COPD: Journal of Chronic Obstructive Pulmonary Disease">COPD: Journal of Chronic Obstructive Pulmonary Disease</a></i></td>
<td>Respiratory Health</td>
<td><a href="/wiki/Informa_Healthcare" class="mw-redirect" title="Informa Healthcare">Informa Healthcare</a></td>
<td>English</td>
<td>2004-present
</td></tr>
<tr>
<td><i><a href="/wiki/Critical_Care_Medicine" class="mw-redirect" title="Critical Care Medicine">Critical Care Medicine</a></i></td>
<td>Emergency Medicine</td>
<td><a href="/wiki/Lippincott_Williams_%26_Wilkins" title="<NAME> & Wilkins"><NAME> & Wilkins</a></td>
<td>English</td>
<td>1973-present
</td></tr>
<tr>
<td><i><a href="/wiki/Critical_Reviews_in_Microbiology" title="Critical Reviews in Microbiology">Critical Reviews in Microbiology</a></i></td>
<td>Infectious Disease</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1971-present
</td></tr>
<tr>
<td><i><a href="/wiki/Critical_Reviews_in_Oncogenesis" title="Critical Reviews in Oncogenesis">Critical Reviews in Oncogenesis</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1994-present
</td></tr>
<tr>
<td><i><a href="/wiki/Critical_Reviews_in_Toxicology" title="Critical Reviews in Toxicology">Critical Reviews in Toxicology</a></i></td>
<td>Toxicology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1971-present
</td></tr>
<tr>
<td><i><a href="/wiki/Current_Gene_Therapy" title="Current Gene Therapy">Current Gene Therapy</a></i></td>
<td>Gene Therapy</td>
<td><a href="/wiki/Bentham_Science_Publishers" title="Bentham Science Publishers">Bentham Science Publishers</a></td>
<td>English</td>
<td>2001-present
</td></tr>
<tr>
<td><i><a href="/wiki/Current_Medical_Research_and_Opinion" title="Current Medical Research and Opinion">Current Medical Research and Opinion</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1972-present
</td></tr>
<tr>
<td><i><a href="/wiki/Current_Pain_and_Headache_Reports" title="Current Pain and Headache Reports">Current Pain and Headache Reports</a></i></td>
<td>Headache</td>
<td><a href="/wiki/Springer_Science%2BBusiness_Media" title="Springer Science+Business Media">Springer Science+Business Media</a></td>
<td>English</td>
<td>1994-present
</td></tr>
<tr>
<td><i><a href="/wiki/Cutaneous_and_Ocular_Toxicology" title="Cutaneous and Ocular Toxicology">Cutaneous and Ocular Toxicology</a></i></td>
<td>Toxicology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1982-present
</td></tr>
<tr>
<td><i><a href="/wiki/DARU_(journal)" class="mw-redirect" title="DARU (journal)">DARU Journal of Pharmaceutical Sciences</a></i></td>
<td>Pharmacy</td>
<td><a href="/wiki/BioMed_Central" title="BioMed Central">BioMed Central</a></td>
<td>English</td>
<td>1990-present
</td></tr>
<tr>
<td><i><a href="/wiki/Deutsche_Medizinische_Wochenschrift" title="Deutsche Medizinische Wochenschrift">Deutsche Medizinische Wochenschrift</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Thieme_Medical_Publishers" title="Thieme Medical Publishers">Thieme Medical Publishers</a></td>
<td>German</td>
<td>1875-present
</td></tr>
<tr>
<td><i><a href="/wiki/Developmental_Neurorehabilitation" title="Developmental Neurorehabilitation">Developmental Neurorehabilitation</a></i></td>
<td>Neurology, Pediatrics</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1997-present
</td></tr>
<tr>
<td><i><a href="/wiki/Diabetes_(journal)" title="Diabetes (journal)">Diabetes</a></i></td>
<td>Diabetes</td>
<td><a href="/wiki/American_Diabetes_Association" title="American Diabetes Association">American Diabetes Association</a></td>
<td>English</td>
<td>1952-present
</td></tr>
<tr>
<td><i><a href="/wiki/Diabetes_and_Vascular_Disease_Research" title="Diabetes and Vascular Disease Research">Diabetes and Vascular Disease Research</a></i></td>
<td>Diabetes</td>
<td><a href="/wiki/SAGE_Publications" class="mw-redirect" title="SAGE Publications">SAGE Publications</a></td>
<td>English</td>
<td>2004-present
</td></tr>
<tr>
<td><i><a href="/wiki/Diabetes_Care" title="Diabetes Care">Diabetes Care</a></i></td>
<td>Diabetes</td>
<td><a href="/wiki/American_Diabetes_Association" title="American Diabetes Association">American Diabetes Association</a></td>
<td>English</td>
<td>1978-present
</td></tr>
<tr>
<td><i><a href="/wiki/Diabetes,_Metabolic_Syndrome_and_Obesity:_Targets_and_Therapy" title="Diabetes, Metabolic Syndrome and Obesity: Targets and Therapy">Diabetes, Metabolic Syndrome and Obesity: Targets and Therapy</a></i></td>
<td>Diabetes</td>
<td><a href="/wiki/Dove_Medical_Press" title="Dove Medical Press">Dove Medical Press</a></td>
<td>English</td>
<td>2008-present
</td></tr>
<tr>
<td><i><a href="/wiki/Drug_and_Alcohol_Dependence_(journal)" title="Drug and Alcohol Dependence (journal)">Drug and Alcohol Dependence</a></i></td>
<td>Addiction</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>1975-present
</td></tr>
<tr>
<td><i><a href="/wiki/Emergency_Medicine_Journal" title="Emergency Medicine Journal">Emergency Medicine Journal</a></i></td>
<td>Emergency Medicine</td>
<td><a href="/wiki/BMJ_Group" class="mw-redirect" title="BMJ Group">BMJ Group</a></td>
<td>English</td>
<td>1983-present
</td></tr>
<tr>
<td><i><a href="/wiki/Endocrinology_(journal)" title="Endocrinology (journal)">Endocrinology</a></i></td>
<td>Endocrinology</td>
<td><a href="/wiki/The_Endocrine_Society" class="mw-redirect" title="The Endocrine Society">The Endocrine Society</a></td>
<td>English</td>
<td>1917-present
</td></tr>
<tr>
<td><i><a href="/wiki/Epilepsy_Currents" title="Epilepsy Currents">Epilepsy Currents</a></i></td>
<td>Epilepsy</td>
<td><a href="/wiki/Allen_Press" title="Allen Press">Allen Press</a></td>
<td>English</td>
<td>2001-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Journal_of_Cancer_Prevention" title="European Journal of Cancer Prevention">European Journal of Cancer Prevention</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Lippincott_Williams_%26_Wilkins" title="<NAME> & Wilkins"><NAME> & Wilkins</a></td>
<td>English</td>
<td>1991-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Journal_of_General_Practice" title="European Journal of General Practice">European Journal of General Practice</a></i></td>
<td>Family medicine</td>
<td><a href="/wiki/Taylor_%26_Francis" title="Taylor & Francis">Taylor & Francis</a></td>
<td>English</td>
<td>1995-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Journal_of_Medical_Research" title="European Journal of Medical Research">European Journal of Medical Research</a></i></td>
<td>Clinical research</td>
<td><a href="/wiki/BioMed_Central" title="BioMed Central">BioMed Central</a></td>
<td>English</td>
<td>1995-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Journal_of_Palliative_Care" title="European Journal of Palliative Care">European Journal of Palliative Care</a></i></td>
<td>Palliative Care</td>
<td><a href="/wiki/Hayward_Medical_Communications" title="Hayward Medical Communications">Hayward Medical Communications</a></td>
<td>English</td>
<td>1994-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Journal_of_Physiotherapy" title="European Journal of Physiotherapy">European Journal of Physiotherapy</a></i></td>
<td>Physical Therapy</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>1999-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Medical_Journal" title="European Medical Journal">European Medical Journal</a></i></td>
<td>Medicine</td>
<td>European Medical Journal</td>
<td>English</td>
<td>2012-present
</td></tr>
<tr>
<td><i><a href="/wiki/European_Radiology" title="European Radiology">European Radiology</a></i></td>
<td>Radiology</td>
<td><a href="/wiki/Springer_Science%2BBusiness_Media" title="Springer Science+Business Media">Springer Science+Business Media</a></td>
<td>English</td>
<td>1991-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Biological_Therapy" title="Expert Opinion on Biological Therapy">Expert Opinion on Biological Therapy</a></i></td>
<td>Therapeutics</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>2001-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Drug_Delivery" title="Expert Opinion on Drug Delivery">Expert Opinion on Drug Delivery</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>2004-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Drug_Discovery" title="Expert Opinion on Drug Discovery">Expert Opinion on Drug Discovery</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>2006-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Drug_Metabolism_%26_Toxicology" title="Expert Opinion on Drug Metabolism & Toxicology">Expert Opinion on Drug Metabolism & Toxicology</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Taylor_and_Francis_Group" class="mw-redirect" title="Taylor and Francis Group">Taylor and Francis Group</a></td>
<td>English</td>
<td>2005-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Drug_Safety" title="Expert Opinion on Drug Safety">Expert Opinion on Drug Safety</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Emerging_Drugs" title="Expert Opinion on Emerging Drugs">Expert Opinion on Emerging Drugs</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1996-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Investigational_Drugs" title="Expert Opinion on Investigational Drugs">Expert Opinion on Investigational Drugs</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1992-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Medical_Diagnostics" title="Expert Opinion on Medical Diagnostics">Expert Opinion on Medical Diagnostics</a></i></td>
<td>Diagnostics</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>2007-2013
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Pharmacotherapy" title="Expert Opinion on Pharmacotherapy">Expert Opinion on Pharmacotherapy</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1999-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Therapeutic_Patents" title="Expert Opinion on Therapeutic Patents">Expert Opinion on Therapeutic Patents</a></i></td>
<td>Patents</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1991-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Opinion_on_Therapeutic_Targets" title="Expert Opinion on Therapeutic Targets">Expert Opinion on Therapeutic Targets</a></i></td>
<td>Drug design</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1997-present
</td></tr>
<tr>
<td><i><a href="/wiki/Expert_Review_of_Clinical_Pharmacology" title="Expert Review of Clinical Pharmacology">Expert Review of Clinical Pharmacology</a></i></td>
<td>Clinical pharmacology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>2008-present
</td></tr>
<tr>
<td><i><a href="/wiki/Family_Practice_(journal)" title="Family Practice (journal)">Family Practice (journal)</a></i></td>
<td>Family medicine</td>
<td><a href="/wiki/Oxford_University_Press" title="Oxford University Press">Oxford University Press</a></td>
<td>English</td>
<td>1984-present
</td></tr>
<tr>
<td><i><a href="/wiki/Future_Oncology" title="Future Oncology">Future Oncology</a></i></td>
<td>Oncology</td>
<td><a href="/wiki/Future_Medicine_Ltd" class="mw-redirect" title="Future Medicine Ltd">Future Medicine Ltd</a></td>
<td>English</td>
<td>2005-present
</td></tr>
<tr>
<td><i><a href="/wiki/Gastroenterology_(journal)" title="Gastroenterology (journal)">Gastroenterology</a></i></td>
<td>Gastroenterology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>1943-present
</td></tr>
<tr>
<td><i><a href="/wiki/Gynecologic_Oncology" class="mw-redirect" title="Gynecologic Oncology">Gynecologic Oncology</a></i></td>
<td>Oncology, Gynecology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>1972-present
</td></tr>
<tr>
<td><i><a href="/wiki/Hand_Surgery_(journal)" title="Hand Surgery (journal)">Hand Surgery</a></i></td>
<td>Surgery</td>
<td><a href="/wiki/World_Scientific" title="World Scientific">World Scientific</a></td>
<td>English</td>
<td>1996-present
</td></tr>
<tr>
<td><i><a href="/wiki/Harefuah" title="Harefuah">Harefuah</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Israel_Medical_Association" title="Israel Medical Association">Israel Medical Association</a></td>
<td>Hebrew</td>
<td>1920-present
</td></tr>
<tr>
<td><i><a href="/wiki/Heart_(journal)" title="Heart (journal)">Heart</a></i></td>
<td>Cardiology</td>
<td><a href="/wiki/BMJ_Group" class="mw-redirect" title="BMJ Group">BMJ Group</a></td>
<td>English</td>
<td>1939-present
</td></tr>
<tr>
<td><i><a href="/wiki/Hepatitis_Monthly" title="Hepatitis Monthly">Hepatitis Monthly</a></i></td>
<td>Hepatitis</td>
<td><a href="/wiki/Kowsar" title="Kowsar">Kowsar</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/Hormone_Research_(journal)" class="mw-redirect" title="Hormone Research (journal)">Hormone Research</a></i></td>
<td>Endocrinology</td>
<td><a href="/wiki/Karger_Publishers" title="Karger Publishers">Karger Publishers</a></td>
<td>English</td>
<td>1970-present
</td></tr>
<tr>
<td><i><a href="/wiki/Hospital_Practice" title="Hospital Practice">Hospital Practice</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Informa_Healthcare" class="mw-redirect" title="Informa Healthcare">Informa Healthcare</a></td>
<td>English</td>
<td>1966-present
</td></tr>
<tr>
<td><i><a href="/wiki/Human_Pathology" title="Human Pathology">Human Pathology</a></i></td>
<td>Pathology</td>
<td><a href="/wiki/Saunders" title="Saunders">Saunders</a></td>
<td>English</td>
<td>1970-present
</td></tr>
<tr>
<td><i><a href="/wiki/Human_Reproduction" class="mw-redirect" title="Human Reproduction">Human Reproduction</a></i></td>
<td>Reproductive medicine</td>
<td><a href="/wiki/Oxford_University_Press" title="Oxford University Press">Oxford University Press</a></td>
<td>English</td>
<td>1986-present
</td></tr>
<tr>
<td><i><a href="/wiki/Hypertension_(journal)" title="Hypertension (journal)">Hypertension</a></i></td>
<td>Cardiology</td>
<td><a href="/wiki/American_Heart_Association" title="American Heart Association">American Heart Association</a></td>
<td>English</td>
<td>1979-present
</td></tr>
<tr>
<td><i><a href="/wiki/Immunogenetics_(journal)" title="Immunogenetics (journal)">Immunogenetics</a></i></td>
<td>Immunology, Genetics</td>
<td><a href="/wiki/Springer_Science%2BBusiness_Media" title="Springer Science+Business Media">Springer Science+Business Media</a></td>
<td>English</td>
<td>1974-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Anaesthesia" title="Indian Journal of Anaesthesia">Indian Journal of Anaesthesia</a></i></td>
<td>Anaesthesiology</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>2002-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Dermatology" title="Indian Journal of Dermatology">Indian Journal of Dermatology</a></i></td>
<td>Dermatology</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1955-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Dermatology,_Venereology_and_Leprology" title="Indian Journal of Dermatology, Venereology and Leprology">Indian Journal of Dermatology, Venereology and Leprology</a></i></td>
<td>Dermatology</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1990-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Gastroenterology" title="Indian Journal of Gastroenterology">Indian Journal of Gastroenterology</a></i></td>
<td>Gastroenterology</td>
<td><a href="/w/index.php?title=Indian_Society_of_Gastroenterology&action=edit&redlink=1" class="new" title="Indian Society of Gastroenterology (page does not exist)">Indian Society of Gastroenterology</a></td>
<td>English</td>
<td>1982-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Medical_Microbiology" title="Indian Journal of Medical Microbiology">Indian Journal of Medical Microbiology</a></i></td>
<td>Infectious Disease</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1983-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Medical_Research" title="Indian Journal of Medical Research">Indian Journal of Medical Research</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1913-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Medical_Sciences" title="Indian Journal of Medical Sciences">Indian Journal of Medical Sciences</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1947-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Ophthalmology" title="Indian Journal of Ophthalmology">Indian Journal of Ophthalmology</a></i></td>
<td>Ophthalmology</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1953-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Journal_of_Pharmacology" title="Indian Journal of Pharmacology">Indian Journal of Pharmacology</a></i></td>
<td>Pharmacology</td>
<td><a href="/wiki/Medknow_Publications" title="Medknow Publications">Medknow Publications</a></td>
<td>English</td>
<td>1969-present
</td></tr>
<tr>
<td><i><a href="/wiki/Indian_Pacing_and_Electrophysiology_Journal" title="Indian Pacing and Electrophysiology Journal">Indian Pacing and Electrophysiology Journal</a></i></td>
<td>Cardiology</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2001-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Archives_of_Medicine" title="International Archives of Medicine">International Archives of Medicine</a></i></td>
<td>Medicine</td>
<td><a href="/w/index.php?title=IMed.pub&action=edit&redlink=1" class="new" title="IMed.pub (page does not exist)">iMed.pub</a></td>
<td>English</td>
<td>2008-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Geriatric_Psychiatry" title="International Journal of Geriatric Psychiatry">International Journal of Geriatric Psychiatry</a></i></td>
<td>Geriatrics, Psychology</td>
<td><a href="/wiki/John_Wiley_%26_Sons" title="<NAME> & Sons"><NAME>iley & Sons</a></td>
<td>English</td>
<td>1986-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Medical_Sciences" title="International Journal of Medical Sciences">International Journal of Medical Sciences</a></i></td>
<td>Medicine</td>
<td><a href="/wiki/Ivyspring_International_Publisher" title="Ivyspring International Publisher">Ivyspring International Publisher</a></td>
<td>English</td>
<td>2004-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Obesity" title="International Journal of Obesity">International Journal of Obesity</a></i></td>
<td>Obesity</td>
<td><a href="/wiki/Nature_Publishing_Group" title="Nature Publishing Group">Nature Publishing Group</a></td>
<td>English</td>
<td>1977-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Psychoanalysis" class="mw-redirect" title="International Journal of Psychoanalysis">International Journal of Psychoanalysis</a></i></td>
<td>Psychology</td>
<td><a href="/wiki/Wiley-Blackwell" title="Wiley-Blackwell">Wiley-Blackwell</a></td>
<td>English</td>
<td>1920-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Speech-Language_Pathology" title="International Journal of Speech-Language Pathology">International Journal of Speech-Language Pathology</a></i></td>
<td>Speech Pathology</td>
<td><a href="/wiki/Informa" title="Informa">Informa</a></td>
<td>English</td>
<td>1999-present
</td></tr>
<tr>
<td><i><a href="/wiki/International_Journal_of_Surgery" title="International Journal of Surgery">International Journal of Surgery</a></i></td>
<td>Surgery</td>
<td><a href="/wiki/Elsevier" title="Elsevier">Elsevier</a></td>
<td>English</td>
<td>2003-present
</td></tr>
<tr>
<td><i><a href="/wiki/Investigative_Ophthalmology_%26_Visual_Science" title="Investigative Ophthalmology & Visual Science">Investigative Ophthalmology & Visual Science</a></i></td>
<td>Ophthalmology</td>
<td><a href="/w/index.php?title=Cadmus_(publisher)&action=edit&redlink=1" class="new" title="Cadmus | |
in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
eqns = odict([(k, "phi_{}_{} - phi_{}_{}".format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict()
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: list(split_pol(k[2])), keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(wgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
return fit
def delay_lincal(model, data, wgts=None, refant=None, df=9.765625e4, f0=0., solve_offsets=True, medfilt=True,
kernel=(1, 5), verbose=True, antpos=None, four_pol=False, edge_cut=0):
"""
Solve for per-antenna delays according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = delay(g_i_x) - delay(g_j_y)
Can also solve for per-antenna phase offsets with the solve_offsets kwarg.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as reference
Set the reference antenna to have zero delay, such that its phase is set to identically
zero across all freqs. By default use the first key in data.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of the first channel in the data (used for offsets)
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge in FFT window
Output:
-------
fit : dictionary containing delay (tau_i_x) for each antenna and optionally
offset (phi_i_x) for each antenna.
"""
echo("...configuring linsolve data for delay_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
# make wgts
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
# median filter and FFT to get delays
ratio_delays = []
ratio_offsets = []
ratio_wgts = []
for i, k in enumerate(keys):
ratio = data[k] / model[k]
# replace nans
nan_select = np.isnan(ratio)
ratio[nan_select] = 0.0
wgts[k][nan_select] = 0.0
# replace infs
inf_select = np.isinf(ratio)
ratio[inf_select] = 0.0
wgts[k][inf_select] = 0.0
# get delays
dly, offset = utils.fft_dly(ratio, df, f0=f0, wgts=wgts[k], medfilt=medfilt, kernel=kernel, edge_cut=edge_cut)
# set nans to zero
rwgts = np.nanmean(wgts[k], axis=1, keepdims=True)
isnan = np.isnan(dly)
dly[isnan] = 0.0
rwgts[isnan] = 0.0
offset[isnan] = 0.0
ratio_delays.append(dly)
ratio_offsets.append(offset)
ratio_wgts.append(rwgts)
ratio_delays = np.array(ratio_delays)
ratio_offsets = np.array(ratio_offsets)
ratio_wgts = np.array(ratio_wgts)
# form ydata
ydata = odict(zip(keys, ratio_delays))
# form wgts
ywgts = odict(zip(keys, ratio_wgts))
# setup linsolve equation dictionary
eqns = odict([(k, 'tau_{}_{} - tau_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
# setup design matrix dictionary
ls_design_matrix = odict()
# setup linsolve data dictionary
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: [split_pol(k[2])[0], split_pol(k[2])[1]], keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['tau_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['tau_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
# setup linsolve parameters
ydata = odict(zip(keys, ratio_offsets))
eqns = odict([(k, 'phi_{}_{} - phi_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
ls_design_matrix = odict()
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
offset_fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
fit.update(offset_fit)
return fit
def delay_slope_lincal(model, data, antpos, wgts=None, refant=None, df=9.765625e4, f0=0.0, medfilt=True,
kernel=(1, 5), assume_2D=True, four_pol=False, edge_cut=0, time_avg=False,
return_gains=False, gain_ants=[], verbose=True):
"""
Solve for an array-wide delay slope according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = dot(T_x, r_i) - dot(T_y, r_j)
This does not solve for per-antenna delays, but rather a delay slope across the array.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of 0th channel in Hz.
Optional, but used to get gains without a delay offset.
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like T_0, T_1, T_2, etc. corresponding to the dimensions in antpos.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge of vis in FFT window
time_avg : boolean, if True, replace resultant antenna delay slope with the median across time
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing delay slope (T_x) for each pol [seconds / meter].
If assume_2D is False, then these will be the more general T_0, T_1, T_2, etc.
corresponding to the dimensions in antpos, instead of T_ew or T_ns.
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for delay_slope_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make unit wgts if None
if wgts is None:
wgts = {k: np.ones_like(data[k], dtype=np.float) for k in keys}
# center antenna positions about the reference antenna
if | |
= DataFrame(data, index=dr, columns=list("A"))
df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A"))
# index is not monotonic increasing or decreasing
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="pad")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="ffill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="bfill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="nearest")
def test_reindex_sparse(self):
# https://github.com/pandas-dev/pandas/issues/35286
df = DataFrame(
{"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))}
)
result = df.reindex([0, 2])
expected = DataFrame(
{
"A": [0.0, np.nan],
"B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)),
},
index=[0, 2],
)
tm.assert_frame_equal(result, expected)
def test_reindex(self, float_frame):
datetime_series = tm.makeTimeSeries(nper=30)
newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = float_frame.reindex(float_frame.index, copy=False)
assert newFrame.index is float_frame.index
# length zero
newFrame = float_frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = float_frame.reindex([])
newFrame = newFrame.reindex(float_frame.index)
assert len(newFrame.index) == len(float_frame.index)
assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
newFrame = float_frame.reindex(list(datetime_series.index))
expected = datetime_series.index._with_freq(None)
tm.assert_index_equal(newFrame.index, expected)
# copy with no axes
result = float_frame.reindex()
tm.assert_frame_equal(result, float_frame)
assert result is not float_frame
def test_reindex_nan(self):
df = DataFrame(
[[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=["joe", "jim"],
)
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype("object")
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = DataFrame(
{
"other": ["a", "b", np.nan, "c"],
"date": ["2015-03-22", np.nan, "2012-01-08", np.nan],
"amount": [2, 3, 4, 5],
}
)
df["date"] = pd.to_datetime(df.date)
df["delta"] = (pd.to_datetime("2015-06-18") - df["date"]).shift(1)
left = df.set_index(["delta", "other", "date"]).reset_index()
right = df.reindex(columns=["delta", "other", "date", "amount"])
tm.assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(i)
assert df.index.name == "iname"
df = df.reindex(Index(np.arange(10), name="tmpname"))
assert df.index.name == "tmpname"
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(columns=i)
assert df.columns.name == "iname"
def test_reindex_int(self, int_frame):
smaller = int_frame.reindex(int_frame.index[::2])
assert smaller["A"].dtype == np.int64
bigger = smaller.reindex(int_frame.index)
assert bigger["A"].dtype == np.float64
smaller = int_frame.reindex(columns=["A", "B"])
assert smaller["A"].dtype == np.int64
def test_reindex_columns(self, float_frame):
new_frame = float_frame.reindex(columns=["A", "B", "E"])
tm.assert_series_equal(new_frame["B"], float_frame["B"])
assert np.isnan(new_frame["E"]).all()
assert "C" not in new_frame
# Length zero
new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(
data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float,
)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(
data=[
[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method="ffill")
expected = DataFrame(
data=[
[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method="bfill")
expected = DataFrame(
data=[
[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(
np.ones((3, 3)),
index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)],
columns=["a", "b", "c"],
)
time_freq = date_range("2012-01-01", "2012-01-03", freq="d")
some_cols = ["a", "b"]
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.0)
expected = df.copy()
expected[4] = 0.0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value="foo")
expected = df.copy()
expected[4] = "foo"
tm.assert_frame_equal(result, expected)
# other dtypes
df["foo"] = "foo"
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
tm.assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(index=list(range(len(df))))
def test_reindex_with_duplicate_columns(self):
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = DataFrame(
{"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3]
)
result = df.reindex([0, 1, 3])
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis="index")
tm.assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = DataFrame({"A": [1.0, 2], "B": [4.0, 5], "C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify all"):
df.reindex([0, 1], [0], ["A"])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=["A"])
expected = DataFrame({"A": [1, 2]})
tm.assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(["b", "a"], ["e", "d"])
assert "reindex" in str(m[0].message)
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_reindex_boolean(self):
frame = DataFrame(
np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]
)
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self, float_string_frame):
reindexed = float_string_frame.reindex(columns=["foo", "A", "B"])
assert "foo" in reindexed
reindexed = float_string_frame.reindex(columns=["A", "B"])
assert "foo" not in reindexed
def test_reindex_corner(self, int_frame):
index = Index(["a", "b", "c"])
dm = DataFrame({}).reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = int_frame.reindex(columns=["A", "B", "E"])
assert smaller["E"].dtype == np.float64
def test_reindex_with_nans(self):
df = DataFrame(
[[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=["a", "b"],
index=[100.0, 101.0, np.nan, 102.0, 103.0],
)
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, | |
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_20(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(0)
new_vm.memory[0] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [38468488817986530247887777414678086216360049057372179296543030553902011157846])
def test_SHA3_21(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(1)
new_vm.memory[1] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [38468488817986530247887777414678086216360049057372179296543030553902011157846])
def test_SHA3_22(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_23(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_24(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(16)
new_vm.memory[16] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [38468488817986530247887777414678086216360049057372179296543030553902011157846])
def test_SHA3_25(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(32)
new_vm.memory[32] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [38468488817986530247887777414678086216360049057372179296543030553902011157846])
def test_SHA3_26(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(48)
new_vm.memory[48] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [38468488817986530247887777414678086216360049057372179296543030553902011157846])
def test_SHA3_27(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_28(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_29(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_30(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_31(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_32(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_33(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_34(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_35(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_36(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_37(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_38(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_39(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_40(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
| |
<reponame>carderne/descarteslabs-python<gh_stars>0
import pytest
import unittest
import textwrap
from copy import deepcopy
from datetime import datetime
from enum import Enum
from ..catalog_base import CatalogObject
from ..attributes import (
Attribute,
Timestamp,
EnumAttribute,
utc,
MappingAttribute,
ListAttribute,
DocumentState,
Resolution,
File,
AttributeValidationError,
ExtraPropertiesAttribute,
)
from ..band import BandType
class CountToThree(str, Enum):
ONE = "One"
TWO = "Two"
THREE = "Three"
class Nested(MappingAttribute):
foo = Attribute()
dt = Timestamp(mutable=False)
en = EnumAttribute(CountToThree)
class Mapping(MappingAttribute):
bar = Attribute()
nested = Nested()
class FakeCatalogObject(CatalogObject):
mapping = Mapping()
listmapping = ListAttribute(Mapping)
listattribute = ListAttribute(Attribute)
class TestAttributes(unittest.TestCase):
def test_immutabletimestamp(self):
date = Timestamp(readonly=True)
assert date.deserialize(None) is None
assert (
date.deserialize("2019-02-01T00:00:00.0000Z", validate=False).tzinfo == utc
)
assert date.deserialize(
"2019-02-01T00:00:00.0000Z", validate=False
) == datetime(2019, 2, 1, tzinfo=utc)
date = Timestamp(readonly=True)
assert date.deserialize(
datetime(2013, 12, 31, 23, 59, 59), validate=False
) == datetime(2013, 12, 31, 23, 59, 59, tzinfo=utc)
value = date.deserialize(datetime(2013, 12, 31, 23, 59, 59), validate=False)
assert date.serialize(value) == "2013-12-31T23:59:59+00:00"
def test_mutable_timestamp(self):
mutable_date = Timestamp()
assert mutable_date.deserialize(None) is None
assert (
mutable_date.deserialize("2019-02-01T00:00:00.0000Z", validate=False).tzinfo
== utc
)
class TimeObj(CatalogObject):
date = Timestamp()
# does not deserialize when unsaved
obj = TimeObj(id="test-date", date="06/02/2019")
assert obj.date == "06/02/2019"
# does not deserialize when modified from unsaved
obj.date = "Monday, June 2 2019"
assert not isinstance(obj.date, datetime)
assert obj.serialize()["date"] == "Monday, June 2 2019"
# deserializes to datetime when validate=False
obj._attribute_types["date"].__set__(
obj, "2019-06-02T00:00:00.0000Z", validate=False
)
assert isinstance(obj.date, datetime)
# does not deserialize when modified from saved
obj.date = "Monday, June 2 2019"
assert not isinstance(obj.date, datetime)
obj.date = None
assert obj.date is None
def test_datetime_invalid(self):
# This should not raise an exception
Timestamp(readonly=True).deserialize("123439", validate=False)
def test_enum_attribute(self):
enum_attr = EnumAttribute(BandType)
assert enum_attr.deserialize("spectral") == "spectral"
assert enum_attr.serialize("spectral") == "spectral"
assert BandType.SPECTRAL == "spectral"
def test_enum_attribute_invalid(self):
enum_attr = EnumAttribute(BandType)
with pytest.raises(ValueError):
enum_attr.deserialize("foobar")
def test_enum_attribute_new(self):
owner = FakeCatalogObject(id="id")
enum_attr = EnumAttribute(BandType)
owner._attribute_types["something"] = enum_attr
enum_attr._attribute_name = "something"
enum_attr.__set__(owner, "foobar", False)
with pytest.raises(ValueError):
enum_attr.__set__(owner, "foobar", True)
def test_mapping_attributes(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z", validate=False)
mapping = Mapping(nested=nested)
model_object = FakeCatalogObject(id="id", mapping=mapping)
assert model_object.mapping.nested.foo == "foo"
assert model_object.mapping.nested.dt == datetime(2019, 2, 1, tzinfo=utc)
assert model_object.mapping is mapping
assert model_object.mapping.nested is nested
m_repr = repr(model_object.mapping)
match_str = """\
Mapping:
nested: Nested:
dt: 2019-02-01 00:00:00+00:00
foo: foo"""
assert m_repr.strip("\n") == textwrap.dedent(match_str)
with pytest.raises(TypeError):
Mapping("positionals not accepted")
def test_mapping_change_tracking(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z")
mapping = Mapping(nested=nested)
model_object = FakeCatalogObject(id="id", mapping=mapping, _saved=True)
assert not model_object.is_modified
# changes to mapping objects not accessed from the model_object
# affect model state
nested.foo = "blah"
assert model_object.is_modified
assert model_object.mapping.nested.foo == "blah"
# assigning a new attribute value to the model does propagate state changes
new_mapping = Mapping(
nested=Nested(foo="bar", dt=datetime(2019, 3, 1, tzinfo=utc))
)
model_object.mapping = new_mapping
assert model_object.is_modified
assert model_object.mapping.nested.foo == "bar"
assert model_object.mapping.nested.dt == datetime(2019, 3, 1, tzinfo=utc)
assert model_object.mapping is new_mapping
assert len(mapping._model_objects) == 0
def test_mapping_references(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z")
mapping = Mapping(nested=nested, bar="bar")
model_object = FakeCatalogObject(id="id", mapping=mapping, _saved=True)
assert not model_object.is_modified
# once a model mapping attribute is accessed, the reference is reused
mapping1 = model_object.mapping
mapping2 = model_object.mapping
assert mapping1 is mapping2
assert mapping1.nested is mapping2.nested
# changes propagate to all references
assert mapping1.bar == "bar"
assert mapping2.bar == "bar"
mapping1.bar = "baz"
assert mapping1.bar == "baz"
assert mapping2.bar == "baz"
assert model_object.mapping.bar == "baz"
def test_mapping_multiple_assignment(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z")
mapping = Mapping(nested=nested, bar="bar")
model_object1 = FakeCatalogObject(id="id", mapping=mapping, _saved=True)
model_object2 = FakeCatalogObject(id="id", mapping=mapping, _saved=True)
assert not model_object1.is_modified
assert not model_object2.is_modified
# changing 1 reference propagates to all referencing objects
model_object1.mapping.bar = "baz"
assert model_object1.is_modified
assert model_object2.is_modified
def test_mapping_nested_change_tracking(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z")
mapping = Mapping(nested=nested)
model_object = FakeCatalogObject(id="id", mapping=mapping, _saved=True)
assert not model_object.is_modified
# state changes at any level propagate the change back to the model
model_object.mapping.nested.foo = "baz"
assert model_object.mapping.nested.foo == "baz"
assert model_object.is_modified
assert "mapping" in model_object._modified
def test_mapping_serialization(self):
nested = Nested(foo="foo", dt="2019-02-01T00:00:00.0000Z", validate=False)
mapping = Mapping(nested=nested)
model_object = FakeCatalogObject(id="id", mapping=mapping)
serialized = model_object.serialize(modified_only=True)
assert serialized == {
"mapping": {"nested": {"foo": "foo", "dt": "2019-02-01T00:00:00+00:00"}}
}
assert model_object._attributes is not serialized
def test_mapping_deserialization(self):
# Creation with valid enum should be fine
model_object = FakeCatalogObject(id="id", mapping={"nested": {"en": "One"}})
mapping = model_object.mapping
nested = mapping.nested
assert nested.en == "One"
# Creation with invalid enum with values from server should be fine
model_object = FakeCatalogObject(
id="id", mapping={"nested": {"en": "Four"}}, _saved=True
)
mapping = model_object.mapping
nested = mapping.nested
assert nested.en == "Four"
# Creation with invalid enum causes exception
with pytest.raises(ValueError):
FakeCatalogObject(id="id", mapping={"nested": {"en": "Four"}})
# Creation with undefined attribute from server should be fine
model_object = FakeCatalogObject(id="id", mapping={"baz": "qux"}, _saved=True)
mapping = model_object.mapping
assert "baz" not in mapping._attributes
# Creation with undefined attribute causes exception
with pytest.raises(AttributeError):
FakeCatalogObject(id="id", mapping={"baz": "qux"})
def test_mapping_equality(self):
assert Mapping() != Nested()
assert Mapping() == Mapping()
assert Mapping(bar="bar") == Mapping(bar="bar")
assert Mapping(bar="bar") != Mapping()
assert Mapping() != Mapping(bar="bar")
assert Mapping(bar="bar1") != Mapping(bar="bar2")
assert Mapping(bar="bar", nested=Nested(foo="foo")) == Mapping(
bar="bar", nested=Nested(foo="foo")
)
assert Mapping(bar="bar", nested=Nested(foo="foo1")) != Mapping(
bar="bar", nested=Nested(foo="foo2")
)
def test_mapping_hash(self):
with pytest.raises(TypeError):
hash(Mapping())
def test_list_attributes(self):
nested1 = Nested(foo="zap", dt="2019-02-01T00:00:00.0000Z", validate=False)
nested2 = Nested(foo="zip", dt="2019-02-02T00:00:00.0000Z", validate=False)
mapping1 = Mapping(nested=nested1)
mapping2 = Mapping(nested=nested2)
model_object = FakeCatalogObject(
id="id", listmapping=[mapping1, mapping2], listattribute=[12]
)
assert model_object.listmapping[0].nested.foo == "zap"
assert model_object.listmapping[1].nested.foo == "zip"
assert model_object.listmapping[0].nested.dt == datetime(2019, 2, 1, tzinfo=utc)
assert model_object.listmapping[1].nested.dt == datetime(2019, 2, 2, tzinfo=utc)
assert model_object.listmapping is not [mapping1, mapping2]
assert model_object.listmapping[0] is mapping1
assert model_object.listattribute[0] == 12
m_repr = repr(model_object.listmapping)
match_str = """\
[Mapping:
nested: Nested:
dt: 2019-02-01 00:00:00+00:00
foo: zap, Mapping:
nested: Nested:
dt: 2019-02-02 00:00:00+00:00
foo: zip]"""
assert m_repr.strip("\n") == textwrap.dedent(match_str)
def test_listattribute_change_tracking(self):
nested1 = Nested(foo="zap", dt="2019-02-01T00:00:00.0000Z", validate=False)
nested2 = Nested(foo="zip", dt="2019-02-02T00:00:00.0000Z", validate=False)
mapping1 = Mapping(nested=nested1)
mapping2 = Mapping(nested=nested2)
model_object = FakeCatalogObject(
id="id", listmapping=[mapping1, mapping2], _saved=True
)
assert not model_object.is_modified
# references to already instantiated objects are carried forward
assert model_object.listmapping[0] is mapping1
assert model_object.listmapping[1] is mapping2
# changes to references not accessed from attribute still propagate changes
nested1.foo = "zop"
assert model_object.is_modified
assert model_object.listmapping[0].nested.foo == "zop"
# assigning a new attribute value to the model does propagate state changes
new_mapping = Mapping(
nested=Nested(foo="meep", dt=datetime(2019, 3, 1, tzinfo=utc))
)
model_object.listmapping = [new_mapping]
assert model_object.is_modified
assert model_object.listmapping[0].nested.foo == "meep"
assert model_object.listmapping[0].nested.dt == datetime(2019, 3, 1, tzinfo=utc)
def test_listattribute_deserialization(self):
# Creation with valid enum should be fine
model_object = FakeCatalogObject(
id="id",
listmapping=[{"nested": {"en": "One"}}, {"nested": {"en": "Three"}}],
)
listmapping = model_object.listmapping
nested1 = listmapping[0].nested
assert nested1.en == "One"
nested2 = listmapping[1].nested
assert nested2.en == "Three"
# Creation with invalid enum with values from server should be fine
model_object = FakeCatalogObject(
id="id", listmapping=[{"nested": {"en": "Four"}}], _saved=True
)
listmapping = model_object.listmapping
nested = listmapping[0].nested
assert nested.en == "Four"
# Creation with invalid enum causes exception
with pytest.raises(ValueError):
FakeCatalogObject(id="id", listmapping=[{"nested": {"en": "Four"}}])
def test_listattribute_container_methods(self):
nested1 = Nested(foo="zap", dt="2019-02-01T00:00:00.0000Z", validate=False)
nested2 = Nested(foo="zip", dt="2019-02-02T00:00:00.0000Z", validate=False)
mapping1 = Mapping(nested=nested1)
mapping2 = Mapping(nested=nested2)
model_object1 = FakeCatalogObject(
id="id1",
listmapping=[mapping1, mapping2],
listattribute=["hi"],
_saved=True,
)
model_object2 = FakeCatalogObject(
id="id2",
listmapping=[mapping1, mapping2],
listattribute=["hi"],
_saved=True,
)
assert model_object1.listmapping == model_object2.listmapping
assert model_object1.listattribute == model_object2.listattribute
model_object1.listmapping.append(Mapping(nested=nested1))
model_object1.listattribute.append("hello")
assert model_object1.listmapping != model_object2.listmapping
assert model_object1.listattribute != model_object2.listattribute
assert len(model_object1.listmapping) == 3
assert len(model_object1.listattribute) == 2
sliced_list = model_object1.listmapping[1:]
assert len(sliced_list) == 2
assert sliced_list[0] == mapping2
# since model_object1 and model_object2 have different ListAttribute
# instances, one should me modified, and the other not
# they should still retain references to contained MappingAttributes though!
assert model_object1.state == DocumentState.MODIFIED
assert model_object2.state == DocumentState.SAVED
assert model_object1.listmapping[0] is model_object2.listmapping[0]
popped_attr = model_object1.listattribute.pop()
popped_mapping = model_object1.listmapping.pop()
assert popped_attr == "hello"
assert popped_mapping == mapping1
assert len(model_object1.listmapping) == 2
assert len(model_object1.listattribute) == 1
def test_listattribute_delegate_methods(self):
nested1 = Nested(foo="zap", dt="2019-02-01T00:00:00.0000Z", validate=False)
nested2 = Nested(foo="zip", dt="2019-02-02T00:00:00.0000Z", validate=False)
mapping1 = Mapping(nested=nested1)
mapping2 = Mapping(nested=nested2)
model_object = FakeCatalogObject(
id="id1",
listmapping=[mapping1, mapping2],
listattribute=["hi", "bye"],
_saved=True,
)
# magigmethods
la = model_object.listattribute
map_la = model_object.listmapping
assert la + [2] == ["hi", "bye", 2]
assert "hi" in la
assert la * 2 == ["hi", "bye", "hi", "bye"]
assert list(iter(la)) == ["hi", "bye"]
assert list(reversed(la)) == ["bye", "hi"]
assert map_la + [dict(bar="baz")] == [mapping1, mapping2, Mapping(bar="baz")]
assert mapping1 in map_la
assert map_la * 2 == [mapping1, mapping2, mapping1, mapping2]
assert list(iter(map_la)) == [mapping1, mapping2]
assert list(reversed(map_la)) == | |
ID of the compute node.
:param str node_state: State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted.
:param float port: SSH port number of the node.
:param str private_ip_address: Private IP address of the compute node.
:param str public_ip_address: Public IP address of the compute node.
:param str run_id: ID of the Experiment running on the node, if any else null.
"""
pulumi.set(__self__, "node_id", node_id)
pulumi.set(__self__, "node_state", node_state)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "public_ip_address", public_ip_address)
pulumi.set(__self__, "run_id", run_id)
@property
@pulumi.getter(name="nodeId")
def node_id(self) -> str:
"""
ID of the compute node.
"""
return pulumi.get(self, "node_id")
@property
@pulumi.getter(name="nodeState")
def node_state(self) -> str:
"""
State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted.
"""
return pulumi.get(self, "node_state")
@property
@pulumi.getter
def port(self) -> float:
"""
SSH port number of the node.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> str:
"""
Private IP address of the compute node.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="publicIpAddress")
def public_ip_address(self) -> str:
"""
Public IP address of the compute node.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter(name="runId")
def run_id(self) -> str:
"""
ID of the Experiment running on the node, if any else null.
"""
return pulumi.get(self, "run_id")
@pulumi.output_type
class AmlComputeResponse(dict):
"""
An Azure Machine Learning compute.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeType":
suggest = "compute_type"
elif key == "isAttachedCompute":
suggest = "is_attached_compute"
elif key == "provisioningErrors":
suggest = "provisioning_errors"
elif key == "provisioningState":
suggest = "provisioning_state"
elif key == "computeLocation":
suggest = "compute_location"
elif key == "resourceId":
suggest = "resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AmlComputeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AmlComputeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AmlComputeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_type: str,
is_attached_compute: bool,
provisioning_errors: Sequence['outputs.MachineLearningServiceErrorResponse'],
provisioning_state: str,
compute_location: Optional[str] = None,
description: Optional[str] = None,
properties: Optional['outputs.AmlComputeResponseProperties'] = None,
resource_id: Optional[str] = None):
"""
An Azure Machine Learning compute.
:param str compute_type: The type of compute
Expected value is 'AmlCompute'.
:param bool is_attached_compute: Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
:param Sequence['MachineLearningServiceErrorResponse'] provisioning_errors: Errors during provisioning
:param str provisioning_state: The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed.
:param str compute_location: Location for the underlying compute
:param str description: The description of the Machine Learning compute.
:param 'AmlComputeResponseProperties' properties: AML Compute properties
:param str resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AmlCompute')
pulumi.set(__self__, "is_attached_compute", is_attached_compute)
pulumi.set(__self__, "provisioning_errors", provisioning_errors)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> str:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@property
@pulumi.getter(name="isAttachedCompute")
def is_attached_compute(self) -> bool:
"""
Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
"""
return pulumi.get(self, "is_attached_compute")
@property
@pulumi.getter(name="provisioningErrors")
def provisioning_errors(self) -> Sequence['outputs.MachineLearningServiceErrorResponse']:
"""
Errors during provisioning
"""
return pulumi.get(self, "provisioning_errors")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[str]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def properties(self) -> Optional['outputs.AmlComputeResponseProperties']:
"""
AML Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@pulumi.output_type
class AmlComputeResponseProperties(dict):
"""
AML Compute properties
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationState":
suggest = "allocation_state"
elif key == "allocationStateTransitionTime":
suggest = "allocation_state_transition_time"
elif key == "currentNodeCount":
suggest = "current_node_count"
elif key == "nodeStateCounts":
suggest = "node_state_counts"
elif key == "targetNodeCount":
suggest = "target_node_count"
elif key == "enableNodePublicIp":
suggest = "enable_node_public_ip"
elif key == "isolatedNetwork":
suggest = "isolated_network"
elif key == "osType":
suggest = "os_type"
elif key == "remoteLoginPortPublicAccess":
suggest = "remote_login_port_public_access"
elif key == "scaleSettings":
suggest = "scale_settings"
elif key == "userAccountCredentials":
suggest = "user_account_credentials"
elif key == "virtualMachineImage":
suggest = "virtual_machine_image"
elif key == "vmPriority":
suggest = "vm_priority"
elif key == "vmSize":
suggest = "vm_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AmlComputeResponseProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AmlComputeResponseProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AmlComputeResponseProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_state: str,
allocation_state_transition_time: str,
current_node_count: int,
errors: Sequence['outputs.MachineLearningServiceErrorResponse'],
node_state_counts: 'outputs.NodeStateCountsResponse',
target_node_count: int,
enable_node_public_ip: Optional[bool] = None,
isolated_network: Optional[bool] = None,
os_type: Optional[str] = None,
remote_login_port_public_access: Optional[str] = None,
scale_settings: Optional['outputs.ScaleSettingsResponse'] = None,
subnet: Optional['outputs.ResourceIdResponse'] = None,
user_account_credentials: Optional['outputs.UserAccountCredentialsResponse'] = None,
virtual_machine_image: Optional['outputs.VirtualMachineImageResponse'] = None,
vm_priority: Optional[str] = None,
vm_size: Optional[str] = None):
"""
AML Compute properties
:param str allocation_state: Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. There are no changes to the number of compute nodes in the compute in progress. A compute enters this state when it is created and when no operations are being performed on the compute to change the number of compute nodes. resizing - Indicates that the compute is resizing; that is, compute nodes are being added to or removed from the compute.
:param str allocation_state_transition_time: The time at which the compute entered its current allocation state.
:param int current_node_count: The number of compute nodes currently assigned to the compute.
:param Sequence['MachineLearningServiceErrorResponse'] errors: Collection of errors encountered by various compute nodes during node setup.
:param 'NodeStateCountsResponse' node_state_counts: Counts of various node states on the compute.
:param int target_node_count: The target number of compute nodes for the compute. If the allocationState is resizing, this property denotes the target node count for the ongoing resize operation. If the allocationState is steady, this property denotes the target node count for the previous resize operation.
:param bool enable_node_public_ip: Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
:param bool isolated_network: Network is isolated or not
:param str os_type: Compute OS Type
:param str remote_login_port_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
:param 'ScaleSettingsResponse' scale_settings: Scale settings for AML Compute
:param 'ResourceIdResponse' subnet: Virtual network subnet resource ID the compute nodes belong to.
:param 'UserAccountCredentialsResponse' user_account_credentials: Credentials for an administrator user account that will be created on each compute node.
:param 'VirtualMachineImageResponse' virtual_machine_image: Virtual Machine image for AML Compute - windows only
:param str vm_priority: Virtual Machine priority
:param str vm_size: Virtual Machine Size
"""
pulumi.set(__self__, "allocation_state", allocation_state)
pulumi.set(__self__, "allocation_state_transition_time", allocation_state_transition_time)
pulumi.set(__self__, "current_node_count", current_node_count)
pulumi.set(__self__, "errors", errors)
pulumi.set(__self__, "node_state_counts", node_state_counts)
pulumi.set(__self__, "target_node_count", target_node_count)
if enable_node_public_ip is None:
enable_node_public_ip = True
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if isolated_network is not None:
pulumi.set(__self__, "isolated_network", isolated_network)
if os_type is None:
os_type = 'Linux'
if os_type is not None:
| |
<gh_stars>10-100
import json
import os
import ui
import requests
import xml.etree.cElementTree
import threading
import ui
import time
import math
import tarfile
import plistlib
import console
import sqlite3
import shutil
import copy
from urllib.parse import urlparse
from os.path import splitext, basename
from objc_util import ns, ObjCClass
from Managers import DBManager, TypeManager
from Utilities import LogThread
from distutils.version import LooseVersion
class Docset(object):
def __init__(self):
self.displayName = ''
self.downloaded = False
class DocsetManager (object):
def __init__(self, iconPath, typeIconPath, serverManager):
self.typeManager = TypeManager.TypeManager(typeIconPath)
# self.localServer = 'http://localhost/feeds/'
self.localServer = None
self.docsets = []
self.downloading = []
self.updateAvailable = []
self.docsetFolder = 'Docsets/Standard'
self.plistPath = 'Contents/Info.plist'
self.indexPath = 'Contents/Resources/docSet.dsidx'
self.iconPath = iconPath
self.typeIconPath = typeIconPath
self.headers = {
'User-Agent': 'PyDoc-Pythonista'
}
self.__createDocsetFolder()
self.docsetFeeds = self.__getDocsetFeeds()
self.serverManager = serverManager
self.downloadThreads = []
self.uiUpdateThreads = []
self.workThreads = []
self.lastDocsetGroup = None
self.createInitialSearchIndexAllDocsets()
def __createDocsetFolder(self):
if not os.path.exists(self.docsetFolder):
os.mkdir(self.docsetFolder)
def __getDocsetFeeds(self):
with open('feeds.json') as json_data:
data = json.load(json_data)
feeds = []
for feed in data:
f = {'name':self.__getDocsetName(feed['feed']),'detailString':'',
'feed':feed['feed'],
'iconName':feed['icon'],
'isCustom':False,
'feedUrl':'http://kapeli.com/feeds/'+feed['feed'],
'aliases':feed['aliases'],
'hasVersions':feed['hasVersions']}
if feed['feed'] == 'SproutCore.xml':
f['isCustom'] = True
f['feedUrl'] = 'http://docs.sproutcore.com/feeds/' + feed['feed']
f['image'] = self.__getIconWithName(feed['icon'])
feeds.append(f)
return feeds
def getOnlineVersions(self, d = None):
docset = d
if d == None:
docset = self.lastDocsetGroup
else:
self.lastDocsetGroup = d
link = docset['feed']
if link == 'SproutCore.xml':
data=requests.get('http://docs.sproutcore.com/feeds/' + link).text
e = xml.etree.ElementTree.fromstring(data)
version = e.findall('version')[0].text
for atype in e.findall('url'):
return {'url': atype.text, 'version':version}
server = self.serverManager.getDownloadServer(self.localServer)
data = requests.get(server.url+link).text
e = xml.etree.ElementTree.fromstring(data)
urlToUse = ''
url = ''
for atype in e.findall('url'):
if not self.localServer == None:
url = self.localServer
urlToUse = atype.text
if atype.text.find(server.url) >= 0:
urlToUse = atype.text
url = server.url
versions = e.findall('other-versions')[0].findall('version')
ret = []
downloaded = self.getDownloadedDocsets()
toCheck = []
for down in downloaded:
if down['name'] == docset['name']:
toCheck.append(down)
for v in versions:
baseUrl = url
onlineVersion = v.find('name').text
disassembled = urlparse(urlToUse)
filename, file_ext = splitext(basename(disassembled.path))
if not baseUrl[-1] == '/':
baseUrl = baseUrl + '/'
baseUrl = baseUrl + 'zzz/versions/' + filename + '/' + onlineVersion + '/' + filename + file_ext
f = copy.copy(docset)
f['path'] = None
f['downloadUrl'] = baseUrl
f['version'] = onlineVersion
f['status'] = 'online'
for d in downloaded:
if d['name'] == f['name'] and d['version'] == f['version']:
f['status'] = 'installed'
f['path'] = d['path']
f['id'] = d['id']
toCheck.remove(d)
ret.append(f)
for d in toCheck:
d['status'] = 'installed'
ret.append(d)
return sorted(ret, key=lambda x: x['version'], reverse=True)
def getAvailableDocsets(self):
docsets = self.__getOnlineDocsets()
for d in self.__getDownloadedDocsets():
for c in docsets:
if c['name'] == d['name']:
c['status'] = 'installed'
c['path'] = d['path']
c['id'] = d['id']
c['version'] = d['version']
for d in self.updateAvailable:
for c in docsets:
if c['name'] == d['name']:
c['status'] = 'Update Available'
for d in self.__getDownloadingDocsets():
for c in docsets:
if c['name'] == d['name']:
c['version'] = d['version']
c['status'] = d['status']
try:
c['stats'] = d['stats']
except KeyError:
c['stats'] = 'downloading'
return docsets
def getDownloadedDocsets(self):
return self.__getDownloadedDocsets()
def __docsetFeedToDocset(self, feed):
return feed
def __getDownloadedDocsets(self):
dbManager = DBManager.DBManager()
t = dbManager.InstalledDocsetsByType('standard')
ds = []
feeds = self.__getDocsetFeeds()
for d in t:
aa = {}
aa['name'] = d[1]
aa['id'] = d[0]
aa['path'] = os.path.join(os.path.abspath('.'),d[2])
aa['image'] = self.__getIconWithName(d[4])
aa['version'] = d[5]
aa['hasVersions'] = False
for f in feeds:
if f['name'] == aa['name']:
aa['hasVersions'] = f['hasVersions']
ds.append(aa)
return ds
def __getDownloadingDocsets(self):
return self.downloading
def __getOnlineDocsets(self):
feeds = self.__getDocsetFeeds()
onlineDocsets = []
for f in feeds:
obj = self.__docsetFeedToDocset(f)
obj['status'] = 'online'
onlineDocsets.append(obj)
return onlineDocsets
def __getDocsetName(self, feed):
name = feed.replace('.xml','')
name = name.replace('_',' ')
if name == 'NET Framework':
name = '.NET Framework'
return name
def checkDocsetsForUpdates(self, docsets):
console.show_activity('Checking for updates...')
for d in docsets:
if not d['hasVersions'] and d['status'] == 'installed':
console.show_activity('Checking ' + d['name'] + ' for update...')
f = self.__getDownloadLink(d['feed'])
if LooseVersion(str(d['version']).replace('/','')) < LooseVersion(f['version'].replace('/','')):
d['status'] = 'Update Available'
d['version'] = f['version']
self.updateAvailable.append(d)
def __getIconWithName(self, name):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, name+'.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, 'Other.png')
return ui.Image.named(imgPath)
def __checkDocsetCanDownload(self, docset):
cont = True
feed = docset['feed']
title = ''
message = ''
if feed == 'DOM.xml':
cont = False
title = 'DOM Documentation'
message = 'There is no DOM docset. DOM documentation can be found in the JavaScript docset. Please install the JavaScript docset instead.'
elif feed == 'RubyMotion.xml':
cont = False
title = 'RubyMotion Documentation'
message = 'RubyMotion had to remove its API documentation due to legal reasons. Please contact the RubyMotion team for more details.\n\nIn the meantime, you can use the Apple API Reference docset instead.'
elif feed == 'Apple_API_Reference.xml':
cont = False
title = 'Apple API Reference'
message = 'To install the Apple API Reference docset you need to:\n\n1. Use Dash for macOS to install the Apple API Reference docset from Preferences > Downloads\n2. Go to Preferences > Docsets, right click the Apple API Reference docset and select \"Generate iOS Compatible Docset\"\n3. Transfer the resulting docset using the transfer function'
elif feed == 'Apple_Guides_and_Sample_Code.xml':
cont = False
title = 'Apple Guides and Sample Code'
message = 'To install the Apple Guides and Sample Code docset you need to:\n\n1. Download the docset in Xcode 8\'s Preferences > Components > Documentation\n2.Transfer the resulting docset using the transfer function'
elif feed == 'OS_X.xml' or feed == 'macOS.xml' or feed == 'watchOS.xml' or feed == 'iOS.xml' or feed == 'tvOS.xml':
cont = False
title = 'Apple API Reference'
name = docset['name']
message = 'There is no '+name+' docset. The documentation for '+name+' can be found inside the Apple API Reference docset. \n\nTo install the Apple API Reference docset you need to:\n\n1. Use Dash for macOS to install the docset from Preferences > Downloads\n2. Go to Preferences > Docsets, right click the Apple API Reference docset and select \"Generate iOS-compatible Docset\"\n3. Transfer the resulting docset using the transfer function'
if cont == False:
console.alert(title, message,'Ok', hide_cancel_button=True)
return cont
def downloadDocset(self, docset, action, refresh_main_view):
cont = self.__checkDocsetCanDownload(docset)
if cont and not docset in self.downloading:
self.downloading.append(docset)
removeSoon = []
for d in self.updateAvailable:
if d['name'] == docset['name']:
removeSoon.append(d)
for d in removeSoon:
self.updateAvailable.remove(d)
docset['status'] = 'downloading'
action()
workThread = LogThread.LogThread(target=self.__determineUrlAndDownload, args=(docset,action,refresh_main_view,))
self.workThreads.append(workThread)
workThread.start()
def __determineUrlAndDownload(self, docset, action, refresh_main_view):
docset['stats'] = 'getting download link'
action()
if not 'downloadUrl' in docset.keys():
data = self.__getDownloadLink(docset['feed'])
docset['version'] = data['version']
downloadLink = data['url']
else:
downloadLink = docset['downloadUrl']
downloadThread = LogThread.LogThread(target=self.downloadFile, args=(downloadLink,docset,refresh_main_view,))
self.downloadThreads.append(downloadThread)
downloadThread.start()
updateThread = LogThread.LogThread(target=self.updateUi, args=(action,downloadThread,))
self.uiUpdateThreads.append(updateThread)
updateThread.start()
def updateUi(self, action, t):
while t.is_alive():
action()
time.sleep(0.5)
action()
def __getDownloadLink(self, link):
if link == 'SproutCore.xml':
data=requests.get('http://docs.sproutcore.com/feeds/' + link).text
e = xml.etree.ElementTree.fromstring(data)
version = e.findall('version')[0].text
for atype in e.findall('url'):
return {'url': atype.text, 'version':version}
server = self.serverManager.getDownloadServer(self.localServer)
data = requests.get(server.url+link).text
e = xml.etree.ElementTree.fromstring(data)
version = e.findall('version')[0].text
for atype in e.findall('url'):
if not self.localServer == None:
disassembled = urlparse(atype.text)
filename, file_ext = splitext(basename(disassembled.path))
url = self.localServer
if not url[-1] == '/':
url = url + '/'
url = url + filename + file_ext
return {'url': url, 'version':version}
if atype.text.find(server.url) >= 0:
return {'url': atype.text, 'version':version}
def downloadFile(self, url, docset, refresh_main_view):
local_filename = self.__downloadFile(url, docset)
#self.__downloadFile(url+'.tarix', docset)
docset['status'] = 'waiting for install'
self.installDocset(local_filename, docset, refresh_main_view)
def __downloadFile(self, url, docset):
local_filename = self.docsetFolder+'/'+str(docset['version'].replace('/','_'))+url.split('/')[-1]
r = requests.get(url, headers = self.headers, stream=True)
ret = None
if r.status_code == 200:
ret = local_filename
total_length = r.headers.get('content-length')
dl = 0
last = 0
if os.path.exists(local_filename):
os.remove(local_filename)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
dl += len(chunk)
f.write(chunk)
if not total_length == None:
done = 100 * dl / int(total_length)
docset['stats'] = str(round(done,2)) + '% ' + str(self.convertSize(dl)) + ' / '+ str(self.convertSize(float(total_length)))
else:
docset['stats'] = str(self.convertSize(dl))
r.close()
return ret
def installDocset(self, filename, docset, refresh_main_view):
extract_location = os.path.join(self.docsetFolder, '_'+docset['name'].replace('/','_'), '_'+docset['version'].replace('/','_'))
docset['status'] = 'Preparing to install: This might take a while.'
tar = tarfile.open(filename, 'r:gz')
n = [name for name in tar.getnames() if '/' not in name][0]
m = os.path.join(extract_location, n)
tar.extractall(path=extract_location, members = self.track_progress(tar, docset, len(tar.getmembers())))
tar.close()
os.remove(filename)
dbManager = DBManager.DBManager()
dbManager.DocsetInstalled(docset['name'], m, 'standard', docset['iconName'], docset['version'])
self.indexDocset(docset, refresh_main_view, m)
def track_progress(self, members, docset, totalFiles):
i = 0
for member in members:
i = i + 1
done = 100 * i / totalFiles
docset['status'] = 'installing: ' + str(round(done,2)) + '% ' + str(i) + ' / '+ str(totalFiles)
yield member
def indexDocset(self, docset, refresh_main_view, path):
docset['status'] = 'indexing'
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT count(*) FROM sqlite_master WHERE type = \'table\' AND name = \'searchIndex\''
c = conn.execute(sql)
data = c.fetchone()
if int(data[0]) == 0:
sql = 'CREATE TABLE searchIndex(rowid INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT)'
c = conn.execute(sql)
conn.commit()
sql = 'SELECT f.ZPATH, m.ZANCHOR, t.ZTOKENNAME, ty.ZTYPENAME, t.rowid FROM ZTOKEN t, ZTOKENTYPE ty, ZFILEPATH f, ZTOKENMETAINFORMATION m WHERE ty.Z_PK = t.ZTOKENTYPE AND f.Z_PK = m.ZFILE AND m.ZTOKEN = t.Z_PK ORDER BY t.ZTOKENNAME'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
conn.execute("insert into searchIndex values (?, ?, ?, ?)", (t[4], t[2], self.typeManager.getTypeForName(t[3]).name, t[0] ))
conn.commit()
else:
sql = 'SELECT rowid, type FROM searchIndex'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
newType = self.typeManager.getTypeForName(t[1])
if not newType == None and not newType.name == t[1]:
conn.execute("UPDATE searchIndex SET type=(?) WHERE rowid = (?)", (newType.name, t[0] ))
conn.commit()
indexSql = 'CREATE INDEX ix_searchIndex_name ON searchIndex(name)'
conn.execute(indexSql)
conn.close()
self.postProcess(docset, refresh_main_view)
def createInitialSearchIndexAllDocsets(self):
docsets = self.getDownloadedDocsets()
for d in docsets:
indexPath = os.path.join(d['path'], self.indexPath)
conn = sqlite3.connect(indexPath)
indexSql = 'CREATE INDEX IF NOT EXISTS ix_searchIndex_name ON searchIndex(name)'
conn.execute(indexSql)
conn.close()
def postProcess(self, docset, refresh_main_view):
docset['status'] = 'installed'
refresh_main_view()
def convertSize(self, size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '%s %s' % (s,size_name[i])
def getTypesForDocset(self, docset):
types = []
path = docset['path']
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type FROM searchIndex GROUP BY type ORDER BY type COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
for t in data:
types.append(self.typeManager.getTypeForName(t[0]))
return types
def getIndexesbyTypeForDocset(self, docset, type):
indexes = []
path = docset['path']
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE type = (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (type.name,))
data = c.fetchall()
conn.close()
dTypes = {}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'name':t[1],'path':t[2]})
return indexes
def getIndexesbyTypeAndNameForDocset(self, docset, typeName, name):
indexes = []
path = docset['path']
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE | |
steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
| |
lbl='제도 용지'),
2600: SParentCategory(d=array([227]), lbl='프랑스'),
2601: SParentCategory(d=array([], dtype=int32), lbl='후미등'),
2602: SParentCategory(d=array([], dtype=int32), lbl='쿨스카프'),
2603: SParentCategory(d=array([], dtype=int32), lbl='워터파크'),
2604: SParentCategory(d=array([], dtype=int32), lbl='기타 홍삼/인삼가공'),
2605: SParentCategory(d=array([], dtype=int32), lbl='밸리/스포츠 댄스복'),
2606: SParentCategory(d=array([227, 362]), lbl='태국(방콕)'),
2607: SParentCategory(d=array([], dtype=int32), lbl='케미'),
2608: SParentCategory(d=array([], dtype=int32), lbl='시그널램프'),
2609: SParentCategory(d=array([], dtype=int32), lbl='제설용품'),
2610: SParentCategory(d=array([], dtype=int32), lbl='차량용 소화기'),
2611: SParentCategory(d=array([], dtype=int32), lbl='민물찌'),
2612: SParentCategory(d=array([], dtype=int32), lbl='쫄면'),
2613: SParentCategory(d=array([], dtype=int32), lbl='기타 퇴치용품'),
2614: SParentCategory(d=array([], dtype=int32), lbl='마우스피스'),
2615: SParentCategory(d=array([], dtype=int32), lbl='호주(기타)'),
2616: SParentCategory(d=array([], dtype=int32), lbl='과즙기'),
2617: SParentCategory(d=array([], dtype=int32), lbl='부품/용품'),
2618: SParentCategory(d=array([], dtype=int32), lbl='야광봉/응원용품'),
2619: SParentCategory(d=array([], dtype=int32), lbl='휴대용 정수기'),
2620: SParentCategory(d=array([], dtype=int32), lbl='천주교용품'),
2621: SParentCategory(d=array([], dtype=int32), lbl='면봉/귀이개'),
2622: SParentCategory(d=array([], dtype=int32), lbl='루어낚시줄'),
2623: SParentCategory(d=array([], dtype=int32), lbl='즉석카메라 액세서리'),
2624: SParentCategory(d=array([], dtype=int32), lbl='차량용 액자'),
2625: SParentCategory(d=array([], dtype=int32), lbl='카포트'),
2626: SParentCategory(d=array([366]), lbl='마늘/생강'),
2627: SParentCategory(d=array([], dtype=int32), lbl='뜨개질'),
2628: SParentCategory(d=array([], dtype=int32), lbl='커튼봉'),
2629: SParentCategory(d=array([], dtype=int32), lbl='엔진/터보튜닝'),
2630: SParentCategory(d=array([], dtype=int32), lbl='비타민/섬유음료'),
2631: SParentCategory(d=array([], dtype=int32), lbl='패션 마스크'),
2632: SParentCategory(d=array([353]), lbl='레몬/자몽'),
2633: SParentCategory(d=array([], dtype=int32), lbl='클라리넷'),
2634: SParentCategory(d=array([], dtype=int32), lbl='팬플릇'),
2635: SParentCategory(d=array([], dtype=int32), lbl='투명 위생마스크'),
2636: SParentCategory(d=array([], dtype=int32), lbl='기타 제도용품'),
2637: SParentCategory(d=array([], dtype=int32), lbl='자전거 로라(트레이너)'),
2638: SParentCategory(d=array([349]), lbl='나물'),
2639: SParentCategory(d=array([], dtype=int32), lbl='기타 피부관리기'),
2640: SParentCategory(d=array([], dtype=int32), lbl='묘목/조경수'),
2641: SParentCategory(d=array([], dtype=int32), lbl='차량 튜닝핸들'),
2642: SParentCategory(d=array([], dtype=int32), lbl='기타 과일'),
2643: SParentCategory(d=array([], dtype=int32), lbl='CD/DVD장'),
2644: SParentCategory(d=array([], dtype=int32), lbl='무스탕장갑'),
2645: SParentCategory(d=array([], dtype=int32), lbl='브라질'),
2646: SParentCategory(d=array([], dtype=int32), lbl='압축선반'),
2647: SParentCategory(d=array([], dtype=int32), lbl='뜰채'),
2648: SParentCategory(d=array([], dtype=int32), lbl='호주(멜버른)'),
2649: SParentCategory(d=array([], dtype=int32), lbl='중고 아이언'),
2650: SParentCategory(d=array([], dtype=int32), lbl='병원의류'),
2651: SParentCategory(d=array([], dtype=int32), lbl='제도판'),
2652: SParentCategory(d=array([], dtype=int32), lbl='비치가디건/니트'),
2653: SParentCategory(d=array([], dtype=int32), lbl='임산부 보호용품'),
2654: SParentCategory(d=array([], dtype=int32), lbl='커피 로스터기'),
2655: SParentCategory(d=array([], dtype=int32), lbl='트럼펫'),
2656: SParentCategory(d=array([], dtype=int32), lbl='목재/패널'),
2657: SParentCategory(d=array([], dtype=int32), lbl='풋패치'),
2658: SParentCategory(d=array([], dtype=int32), lbl='배구용품'),
2659: SParentCategory(d=array([], dtype=int32), lbl='너겟'),
2660: SParentCategory(d=array([], dtype=int32), lbl='바다찌'),
2661: SParentCategory(d=array([311, 334, 377]), lbl='조개'),
2662: SParentCategory(d=array([], dtype=int32), lbl='간호사신발'),
2663: SParentCategory(d=array([], dtype=int32), lbl='헤어소품'),
2664: SParentCategory(d=array([], dtype=int32), lbl='턱시도'),
2665: SParentCategory(d=array([], dtype=int32), lbl='낚시복'),
2666: SParentCategory(d=array([], dtype=int32), lbl='업소용 주방가전 용품'),
2667: SParentCategory(d=array([], dtype=int32), lbl='게장류'),
2668: SParentCategory(d=array([], dtype=int32), lbl='플라이릴'),
2669: SParentCategory(d=array([], dtype=int32), lbl='안경줄'),
2670: SParentCategory(d=array([], dtype=int32), lbl='수입소고기'),
2671: SParentCategory(d=array([299, 331]), lbl='모기향'),
2672: SParentCategory(d=array([], dtype=int32), lbl='진미채'),
2673: SParentCategory(d=array([], dtype=int32), lbl='패스트푸드'),
2674: SParentCategory(d=array([], dtype=int32), lbl='건망고'),
2675: SParentCategory(d=array([], dtype=int32), lbl='냉장고 정리선반'),
2676: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 백미러'),
2677: SParentCategory(d=array([], dtype=int32), lbl='여성용 자전거'),
2678: SParentCategory(d=array([], dtype=int32), lbl='조명 부속품'),
2679: SParentCategory(d=array([], dtype=int32), lbl='바다낚시릴대'),
2680: SParentCategory(d=array([], dtype=int32), lbl='라텍스베개'),
2681: SParentCategory(d=array([], dtype=int32), lbl='비치타월'),
2682: SParentCategory(d=array([], dtype=int32), lbl='비닐봉투 정리함'),
2683: SParentCategory(d=array([], dtype=int32), lbl='기타건어물'),
2684: SParentCategory(d=array([], dtype=int32), lbl='체조용품'),
2685: SParentCategory(d=array([], dtype=int32), lbl='업소용 가구'),
2686: SParentCategory(d=array([], dtype=int32), lbl='방범창/경보기'),
2687: SParentCategory(d=array([], dtype=int32), lbl='싱크대'),
2688: SParentCategory(d=array([], dtype=int32), lbl='렌즈세척기'),
2689: SParentCategory(d=array([], dtype=int32), lbl='PS/2 연장케이블'),
2690: SParentCategory(d=array([], dtype=int32), lbl='저지방/무지방우유'),
2691: SParentCategory(d=array([], dtype=int32), lbl='토끼용품'),
2692: SParentCategory(d=array([], dtype=int32), lbl='비치원피스'),
2693: SParentCategory(d=array([], dtype=int32), lbl='한실/예단세트'),
2694: SParentCategory(d=array([], dtype=int32), lbl='코사지'),
2695: SParentCategory(d=array([], dtype=int32), lbl='페이스 롤러'),
2696: SParentCategory(d=array([], dtype=int32), lbl='에어혼'),
2697: SParentCategory(d=array([], dtype=int32), lbl='폭립'),
2698: SParentCategory(d=array([], dtype=int32), lbl='기타 레저'),
2699: SParentCategory(d=array([], dtype=int32), lbl='기타 필기용품'),
2700: SParentCategory(d=array([], dtype=int32), lbl='인삼/수삼/산삼'),
2701: SParentCategory(d=array([], dtype=int32), lbl='자전거 포스트'),
2702: SParentCategory(d=array([], dtype=int32), lbl='악기관리용품'),
2703: SParentCategory(d=array([], dtype=int32), lbl='스타킹'),
2704: SParentCategory(d=array([], dtype=int32), lbl='선스프레이'),
2705: SParentCategory(d=array([], dtype=int32), lbl='형광등/안정기'),
2706: SParentCategory(d=array([], dtype=int32), lbl='조립식 구조물'),
2707: SParentCategory(d=array([], dtype=int32), lbl='버터'),
2708: SParentCategory(d=array([], dtype=int32), lbl='초코펜/데코토핑'),
2709: SParentCategory(d=array([], dtype=int32), lbl='독일'),
2710: SParentCategory(d=array([], dtype=int32), lbl='중층낚시줄'),
2711: SParentCategory(d=array([], dtype=int32), lbl='땀패드'),
2712: SParentCategory(d=array([], dtype=int32), lbl='안전 휀스'),
2713: SParentCategory(d=array([], dtype=int32), lbl='침대커버세트'),
2714: SParentCategory(d=array([], dtype=int32), lbl='시멘트/용품'),
2715: SParentCategory(d=array([], dtype=int32), lbl='태클박스'),
2716: SParentCategory(d=array([], dtype=int32), lbl='기독교용품'),
2717: SParentCategory(d=array([], dtype=int32), lbl='각도기'),
2718: SParentCategory(d=array([], dtype=int32), lbl='베이스세트'),
2719: SParentCategory(d=array([], dtype=int32), lbl='터키'),
2720: SParentCategory(d=array([], dtype=int32), lbl='낚시모자'),
2721: SParentCategory(d=array([], dtype=int32), lbl='부력조절기'),
2722: SParentCategory(d=array([], dtype=int32), lbl='건새우'),
2723: SParentCategory(d=array([], dtype=int32), lbl='돈까스소스'),
2724: SParentCategory(d=array([], dtype=int32), lbl='클로렐라'),
2725: SParentCategory(d=array([], dtype=int32), lbl='정원/야외등'),
2726: SParentCategory(d=array([], dtype=int32), lbl='자전거 핸들바'),
2727: SParentCategory(d=array([], dtype=int32), lbl='태권도복'),
2728: SParentCategory(d=array([], dtype=int32), lbl='복합식 가습기'),
2729: SParentCategory(d=array([], dtype=int32), lbl='볼링공'),
2730: SParentCategory(d=array([], dtype=int32), lbl='솜/거즈'),
2731: SParentCategory(d=array([], dtype=int32), lbl='다시팩'),
2732: SParentCategory(d=array([], dtype=int32), lbl='슈트리/부츠키퍼'),
2733: SParentCategory(d=array([], dtype=int32), lbl='목검/가검'),
2734: SParentCategory(d=array([], dtype=int32), lbl='기타 데스크용품'),
2735: SParentCategory(d=array([], dtype=int32), lbl='상황버섯'),
2736: SParentCategory(d=array([], dtype=int32), lbl='셔츠'),
2737: SParentCategory(d=array([], dtype=int32), lbl='한복신발'),
2738: SParentCategory(d=array([], dtype=int32), lbl='영국'),
2739: SParentCategory(d=array([], dtype=int32), lbl='드럼'),
2740: SParentCategory(d=array([], dtype=int32), lbl='코코넛오일'),
2741: SParentCategory(d=array([], dtype=int32), lbl='기타보안용품'),
2742: SParentCategory(d=array([], dtype=int32), lbl='차량용 탈취제'),
2743: SParentCategory(d=array([], dtype=int32), lbl='변기 손잡이'),
2744: SParentCategory(d=array([], dtype=int32), lbl='랍스타'),
2745: SParentCategory(d=array([], dtype=int32), lbl='오리고기/훈제오리'),
2746: SParentCategory(d=array([], dtype=int32), lbl='바구니/선물세트'),
2747: SParentCategory(d=array([], dtype=int32), lbl='여드름압출기'),
2748: SParentCategory(d=array([], dtype=int32), lbl='쌈무'),
2749: SParentCategory(d=array([], dtype=int32), lbl='반건조고구마'),
2750: SParentCategory(d=array([], dtype=int32), lbl='파스'),
2751: SParentCategory(d=array([], dtype=int32), lbl='블루투스 헤드셋'),
2752: SParentCategory(d=array([], dtype=int32), lbl='카드게임'),
2753: SParentCategory(d=array([], dtype=int32), lbl='카세트/라디오 액세서리'),
2754: SParentCategory(d=array([], dtype=int32), lbl='낚시보조가방'),
2755: SParentCategory(d=array([], dtype=int32), lbl='악기스탠드'),
2756: SParentCategory(d=array([], dtype=int32), lbl='스노클링'),
2757: SParentCategory(d=array([], dtype=int32), lbl='버티컬'),
2758: SParentCategory(d=array([], dtype=int32), lbl='야외테이블/세트'),
2759: SParentCategory(d=array([], dtype=int32), lbl='아랍에미리트(두바이)'),
2760: SParentCategory(d=array([], dtype=int32), lbl='차량용 옷걸이'),
2761: SParentCategory(d=array([], dtype=int32), lbl='즉석국'),
2762: SParentCategory(d=array([], dtype=int32), lbl='초유'),
2763: SParentCategory(d=array([], dtype=int32), lbl='연비향상용품'),
2764: SParentCategory(d=array([], dtype=int32), lbl='모터싸이클 보호대'),
2765: SParentCategory(d=array([], dtype=int32), lbl='사이드스텝'),
2766: SParentCategory(d=array([], dtype=int32), lbl='면도경'),
2767: SParentCategory(d=array([], dtype=int32), lbl='얼굴축소기/밴드'),
2768: SParentCategory(d=array([], dtype=int32), lbl='스프링/완충기'),
2769: SParentCategory(d=array([], dtype=int32), lbl='저주파치료기'),
2770: SParentCategory(d=array([], dtype=int32), lbl='핸드벨'),
2771: SParentCategory(d=array([], dtype=int32), lbl='거위털/오리털이불솜'),
2772: SParentCategory(d=array([383]), lbl='고추/피망/파프리카'),
2773: SParentCategory(d=array([], dtype=int32), lbl='남성비치웨어'),
2774: SParentCategory(d=array([227]), lbl='태국(파타야)'),
2775: SParentCategory(d=array([], dtype=int32), lbl='렌즈세정액'),
2776: SParentCategory(d=array([], dtype=int32), lbl='서랍레일'),
2777: SParentCategory(d=array([], dtype=int32), lbl='황토매트'),
2778: SParentCategory(d=array([], dtype=int32), lbl='정전기방지 키홀더'),
2779: SParentCategory(d=array([], dtype=int32), lbl='샌프란시스코'),
2780: SParentCategory(d=array([], dtype=int32), lbl='가공란'),
2781: SParentCategory(d=array([], dtype=int32), lbl='한복 잡화'),
2782: SParentCategory(d=array([], dtype=int32), lbl='헤드업디스플레이'),
2783: SParentCategory(d=array([], dtype=int32), lbl='사무용 파티션'),
2784: SParentCategory(d=array([], dtype=int32), lbl='베지푸드'),
2785: SParentCategory(d=array([318, 359, 403]), lbl='카페/음료'),
2786: SParentCategory(d=array([], dtype=int32), lbl='새우/대하'),
2787: SParentCategory(d=array([], dtype=int32), lbl='출산용품 DIY'),
2788: SParentCategory(d=array([], dtype=int32), lbl='멜로디언'),
2789: SParentCategory(d=array([], dtype=int32), lbl='애견신발'),
2790: SParentCategory(d=array([], dtype=int32), lbl='난방기기 주변용품'),
2791: SParentCategory(d=array([], dtype=int32), lbl='차량용 CD바이저'),
2792: SParentCategory(d=array([], dtype=int32), lbl='수질측정기'),
2793: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 가방'),
2794: SParentCategory(d=array([], dtype=int32), lbl='라이트 복원제'),
2795: SParentCategory(d=array([], dtype=int32), lbl='스키/보드 가방'),
2796: SParentCategory(d=array([], dtype=int32), lbl='세안도구'),
2797: SParentCategory(d=array([], dtype=int32), lbl='위생 덧신'),
2798: SParentCategory(d=array([227]), lbl='대만'),
2799: SParentCategory(d=array([], dtype=int32), lbl='초소형/스파이 캠'),
2800: SParentCategory(d=array([], dtype=int32), lbl='캐나다(토론토)'),
2801: SParentCategory(d=array([], dtype=int32), lbl='콩나물/새싹재배기'),
2802: SParentCategory(d=array([], dtype=int32), lbl='캐리어세트'),
2803: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 잠금장치'),
2804: SParentCategory(d=array([], dtype=int32), lbl='현지투어/입장권'),
2805: SParentCategory(d=array([], dtype=int32), lbl='기타 용지'),
2806: SParentCategory(d=array([], dtype=int32), lbl='네온사인'),
2807: SParentCategory(d=array([], dtype=int32), lbl='신생아용 기저귀'),
2808: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 하의'),
2809: SParentCategory(d=array([], dtype=int32), lbl='전자드럼'),
2810: SParentCategory(d=array([], dtype=int32), lbl='일본(기타)'),
2811: SParentCategory(d=array([], dtype=int32), lbl='찜기'),
2812: SParentCategory(d=array([], dtype=int32), lbl='방수/방풍용품'),
2813: SParentCategory(d=array([], dtype=int32), lbl='웨딩 베일'),
2814: SParentCategory(d=array([], dtype=int32), lbl='자전거/인라인 가방'),
2815: SParentCategory(d=array([], dtype=int32), lbl='유부'),
2816: SParentCategory(d=array([], dtype=int32), lbl='흙/자갈/모래'),
2817: SParentCategory(d=array([], dtype=int32), lbl='실내 분수대'),
2818: SParentCategory(d=array([], dtype=int32), lbl='초코우유/초코음료'),
2819: SParentCategory(d=array([], dtype=int32), lbl='유럽(기타)'),
2820: SParentCategory(d=array([], dtype=int32), lbl='밑밥주걱'),
2821: SParentCategory(d=array([], dtype=int32), lbl='보트'),
2822: SParentCategory(d=array([351, 393]), lbl='참외/메론/수박'),
2823: SParentCategory(d=array([], dtype=int32), lbl='보리차'),
2824: SParentCategory(d=array([], dtype=int32), lbl='방충복/방충모자'),
2825: SParentCategory(d=array([362]), lbl='괌'),
2826: SParentCategory(d=array([], dtype=int32), lbl='받침틀'),
2827: SParentCategory(d=array([], dtype=int32), lbl='옥돔'),
2828: SParentCategory(d=array([], dtype=int32), lbl='남미(기타)'),
2829: SParentCategory(d=array([], dtype=int32), lbl='배드민턴용품'),
2830: SParentCategory(d=array([], dtype=int32), lbl='스테인리스 아이스큐브'),
2831: SParentCategory(d=array([], dtype=int32), lbl='낚시조끼'),
2832: SParentCategory(d=array([], dtype=int32), lbl='자전거 허브'),
2833: SParentCategory(d=array([], dtype=int32), lbl='출산기념용품'),
2834: SParentCategory(d=array([], dtype=int32), lbl='골동품'),
2835: SParentCategory(d=array([], dtype=int32), lbl='때비누'),
2836: SParentCategory(d=array([], dtype=int32), lbl='치즈스틱'),
2837: SParentCategory(d=array([], dtype=int32), lbl='야구잡화'),
2838: SParentCategory(d=array([], dtype=int32), lbl='기타네일소품'),
2839: SParentCategory(d=array([], dtype=int32), lbl='등산화소품'),
2840: SParentCategory(d=array([], dtype=int32), lbl='파라핀용품'),
2841: SParentCategory(d=array([], dtype=int32), lbl='새우/오징어튀김'),
2842: SParentCategory(d=array([], dtype=int32), lbl='명태/동태'),
2843: SParentCategory(d=array([], dtype=int32), lbl='구강물티슈'),
2844: SParentCategory(d=array([], dtype=int32), lbl='다림풀'),
2845: SParentCategory(d=array([], dtype=int32), lbl='낚시신발'),
2846: SParentCategory(d=array([], dtype=int32), lbl='하우스자재'),
2847: SParentCategory(d=array([], dtype=int32), lbl='메이크업 박스'),
2848: SParentCategory(d=array([], dtype=int32), lbl='키위/참다래'),
2849: SParentCategory(d=array([], dtype=int32), lbl='가발 마네킹/스탠드'),
2850: SParentCategory(d=array([227]), lbl='홍콩'),
2851: SParentCategory(d=array([], dtype=int32), lbl='파이프머신/부속품'),
2852: SParentCategory(d=array([], dtype=int32), lbl='파충류용품'),
2853: SParentCategory(d=array([], dtype=int32), lbl='퐁듀용품'),
2854: SParentCategory(d=array([], dtype=int32), lbl='들기름'),
2855: SParentCategory(d=array([], dtype=int32), lbl='고도계'),
2856: SParentCategory(d=array([], dtype=int32), lbl='USB온열기'),
2857: SParentCategory(d=array([], dtype=int32), lbl='프린터 공유기'),
2858: SParentCategory(d=array([], dtype=int32), lbl='암워머'),
2859: SParentCategory(d=array([], dtype=int32), lbl='미용실'),
2860: SParentCategory(d=array([], dtype=int32), lbl='보드게임용품'),
2861: SParentCategory(d=array([], dtype=int32), lbl='차량용 접지'),
2862: SParentCategory(d=array([], dtype=int32), lbl='인테리어 몰딩'),
2863: SParentCategory(d=array([227]), lbl='중국(상해)'),
2864: SParentCategory(d=array([], dtype=int32), lbl='생크림/휘핑크림'),
2865: SParentCategory(d=array([], dtype=int32), lbl='낙지'),
2866: SParentCategory(d=array([], dtype=int32), lbl='냉장고 소모품'),
2867: SParentCategory(d=array([], dtype=int32), lbl='기타 건축자재'),
2868: SParentCategory(d=array([], dtype=int32), lbl='캐나다(기타)'),
2869: SParentCategory(d=array([], dtype=int32), lbl='유아동 귀걸이'),
2870: SParentCategory(d=array([], dtype=int32), lbl='바다루어대'),
2871: SParentCategory(d=array([], dtype=int32), lbl='젬베'),
2872: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 오일'),
2873: SParentCategory(d=array([], dtype=int32), lbl='바다낚시세트'),
2874: SParentCategory(d=array([], dtype=int32), lbl='차량용 재떨이'),
2875: SParentCategory(d=array([], dtype=int32), lbl='차량용 가습기'),
2876: SParentCategory(d=array([], dtype=int32), lbl='짜장'),
2877: SParentCategory(d=array([], dtype=int32), lbl='마이크로화이바이불솜'),
2878: SParentCategory(d=array([], dtype=int32), lbl='수의/장례용품'),
2879: SParentCategory(d=array([], dtype=int32), lbl='휠체어'),
2880: SParentCategory(d=array([], dtype=int32), lbl='CD/DVD 복사기'),
2881: SParentCategory(d=array([], dtype=int32), lbl='기타 건강측정용품'),
2882: SParentCategory(d=array([], dtype=int32), lbl='멕시코'),
2883: SParentCategory(d=array([], dtype=int32), lbl='시동경보기'),
2884: SParentCategory(d=array([], dtype=int32), lbl='백일/돌잔치 용품'),
2885: SParentCategory(d=array([], dtype=int32), lbl='메트로놈'),
2886: SParentCategory(d=array([], dtype=int32), lbl='차량용 배선'),
2887: SParentCategory(d=array([], dtype=int32), lbl='간편 도시락'),
2888: SParentCategory(d=array([], dtype=int32), lbl='민물낚시바늘'),
2889: SParentCategory(d=array([], dtype=int32), lbl='상커버/부속품'),
2890: SParentCategory(d=array([], dtype=int32), lbl='연수기'),
2891: SParentCategory(d=array([], dtype=int32), lbl='모터사이클 신발'),
2892: SParentCategory(d=array([], dtype=int32), lbl='삼치'),
2893: SParentCategory(d=array([], dtype=int32), lbl='스텝스툴'),
2894: SParentCategory(d=array([], dtype=int32), lbl='중국(기타)'),
2895: SParentCategory(d=array([219, 227, 344]), lbl='태국(푸켓)'),
2896: SParentCategory(d=array([], dtype=int32), lbl='중층찌케이스'),
2897: SParentCategory(d=array([], dtype=int32), lbl='태블릿PC 충전기'),
2898: SParentCategory(d=array([], dtype=int32), lbl='메모리폼베개'),
2899: SParentCategory(d=array([], dtype=int32), lbl='선루프'),
2900: SParentCategory(d=array([], dtype=int32), lbl='빨래삶통'),
2901: SParentCategory(d=array([], | |
"""
Copyright (C) 2011 <NAME>
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
from QuantLib import *
import unittest
class AssetSwapTest(unittest.TestCase):
def setUp(self):
# initial setup
self.termStructure = RelinkableYieldTermStructureHandle()
self.swapSettlementDays = 2
self.faceAmount = 100.0
self.fixedConvention = Unadjusted
self.compounding = Continuous
self.fixedFrequency = Annual
self.floatingFrequency = Semiannual
self.iborIndex = Euribor(Period(self.floatingFrequency), self.termStructure)
self.calendar = self.iborIndex.fixingCalendar()
self.swapIndex= SwapIndex("EuriborSwapIsdaFixA", Period(10,Years), self.swapSettlementDays,
self.iborIndex.currency(), self.calendar,
Period(self.fixedFrequency), self.fixedConvention,
self.iborIndex.dayCounter(), self.iborIndex)
self.spread = 0.0
self.nonnullspread = 0.003
self.today = Date(24,April,2007)
Settings.instance().evaluationDate = self.today
self.termStructure.linkTo(FlatForward(self.today, 0.05, Actual365Fixed()))
self.yieldCurve = FlatForward(self.today, 0.05, Actual365Fixed())
self.pricer = BlackIborCouponPricer()
self.swaptionVolatilityStructure = SwaptionVolatilityStructureHandle(ConstantSwaptionVolatility(self.today, NullCalendar(),Following,
0.2, Actual365Fixed()))
self.meanReversionQuote = QuoteHandle(SimpleQuote(0.01))
self.cmspricer = AnalyticHaganPricer(self.swaptionVolatilityStructure,
GFunctionFactory.Standard,
self.meanReversionQuote)
def testConsistency(self) :
"""Testing consistency between fair price and fair spread..."""
bondCalendar = TARGET()
settlementDays = 3
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
bondSchedule = Schedule(Date(4,January,2005),
Date(4,January,2037),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
bond = FixedRateBond(settlementDays, self.faceAmount,
bondSchedule,[0.04],
ActualActual(ActualActual.ISDA),
Following,
100.0, Date(4,January,2005))
payFixedRate = True
bondPrice = 95.0
isPar = True
parAssetSwap = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
Settings.instance().evaluationDate)
parAssetSwap.setPricingEngine(swapEngine)
fairCleanPrice = parAssetSwap.fairCleanPrice()
fairSpread = parAssetSwap.fairSpread()
tolerance = 1.0e-13
assetSwap2 = AssetSwap(payFixedRate, bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap2.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap2.NPV())>tolerance,
"\npar asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap2.fairCleanPrice())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairSpread() - self.spread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread "
+ "at zero NPV: "
+ "\n input spread: " + str(self.spread )
+ "\n fair spread: " + str(assetSwap2.fairSpread() )
+ "\n NPV: " + str(assetSwap2.NPV() )
+ "\n tolerance: " + str(tolerance))
assetSwap3 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap3.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap3.NPV())>tolerance,
"\npar asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairCleanPrice() - bondPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap3.fairCleanPrice())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at"
+ " zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap3.fairSpread())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
## let's change the npv date
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
bond.settlementDate())
parAssetSwap.setPricingEngine(swapEngine)
## fair clean price and fair spread should not change
self.assertFalse(abs(parAssetSwap.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price changed with NpvDate:"
+ "\n expected clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(parAssetSwap.fairCleanPrice())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(parAssetSwap.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread changed with NpvDate:"
+ "\n expected spread: " + str(fairSpread)
+ "\n fair spread: " + str(parAssetSwap.fairSpread())
+ "\n tolerance: " + str(tolerance))
assetSwap2 = AssetSwap(payFixedRate,
bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap2.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap2.NPV())>tolerance,
"\npar asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap2.fairCleanPrice())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairSpread() - self.spread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(self.spread)
+ "\n fair spread: " + str(assetSwap2.fairSpread())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
assetSwap3 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap3.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap3.NPV())>tolerance,
"\npar asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairCleanPrice() - bondPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap3.fairCleanPrice())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap3.fairSpread())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
## now market asset swap
isPar = False
mktAssetSwap = AssetSwap (payFixedRate,
bond, bondPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
Settings.instance().evaluationDate)
mktAssetSwap.setPricingEngine(swapEngine)
fairCleanPrice = mktAssetSwap.fairCleanPrice()
fairSpread = mktAssetSwap.fairSpread()
assetSwap4 = AssetSwap (payFixedRate,
bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap4.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap4.NPV())>tolerance,
"\nmarket asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairCleanPrice() - fairCleanPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap4.fairCleanPrice())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairSpread() - self.spread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread"
+ " at zero NPV: "
+ "\n input spread: " + str(self.spread)
+ "\n fair spread: " + str(assetSwap4.fairSpread())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
assetSwap5 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap5.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap5.NPV())>tolerance,
"\nmarket asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairCleanPrice() - bondPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap5.fairCleanPrice())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairSpread() - fairSpread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap5.fairSpread())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
## let's change the npv date
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
bond.settlementDate())
mktAssetSwap.setPricingEngine(swapEngine)
## fair clean | |
d_source + d_target - d_source_target
# if at least 2 interactions
# or previous point is a diffraction
if (len(tahe)<2) or (len(visited[-2])==1) or (len(visited[-1])==1):
ratio = 1.0
ratio2 = 1.0
else:
# Determine the origin of the cone
# either the transmitter (ilast =0)
# or the last diffraction point (ilast=udiff[-1] )
udiff = [ k for k in range(len(visited)) if len(visited[k])==1 ]
if udiff==[]:
ilast = 0
else:
ilast=udiff[-1]
#print(tahe)
pta0 = tahe[ilast][0] # tail first segment (last difraction)
phe0 = tahe[ilast][1] # head first segment
#
# TODO : it would be better to replace pta_ and phe_ with the intersection
# of the previous cone with tahe[-1]
#
pta_ = tahe[-1][0] # tail last segment
phe_ = tahe[-1][1] # head last segment
#
# Calculates the left and right vector of the cone
#
# vl left vector
# vr right vector
#
#
# Detect situations of connected segments
#
# [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]
# if visited == [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]:
# print '\n',visited
# import ipdb
# ipdb.set_trace()
connected = False
if (pta0==pta_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = phe_ - apex
elif (pta0==phe_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = pta_ - apex
elif (phe0==pta_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = phe_ - apex
elif (phe0==phe_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = pta_ - apex
if connected:
if ((np.linalg.norm(v0)==0) or (np.linalg.norm(v_)==0)):
logger.debug("pta0 : %g,%g", pta0[0], pta0[1])
logger.debug("pta_ : %g,%g", pta_[0], pta_[1])
logger.debug("phe0 : %g,%g", phe0[0], phe0[1])
logger.debug("phe_ : %g,%g", phe_[0], phe_[1])
logger.debug("v0 : %g,%g", v0[0], v0[1])
logger.debug("v_ : %g,%g", v_[0], v_[1])
#
# Does the cone is built from 2 connected segments or
# 2 unconnected segments
#
if not connected:
if not (geu.ccw(pta0,phe0,phe_) ^
geu.ccw(phe0,phe_,pta_) ):
vr = (pta0,phe_)
vl = (phe0,pta_)
else: # twisted case
vr = (pta0,pta_)
vl = (phe0,phe_)
# cone dot product
# print vr
# print vl
vr_n = (vr[1]-vr[0])/np.linalg.norm(vr[1]-vr[0])
vl_n = (vl[1]-vl[0])/np.linalg.norm(vl[1]-vl[0])
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.0))
#angle_cone = np.arccos(vrdotvl)
# prepare lines and seg argument for intersection checking
if angle_cone!=0:
linel = (vl[0],vl[1]-vl[0])
liner = (vr[0],vr[1]-vr[0])
# from origin mirrored segment to be tested
seg = (th_mirror[0],th_mirror[1])
# apex calculation
a0u = np.dot(pta0,vr_n)
a0v = np.dot(pta0,vl_n)
b0u = np.dot(phe0,vr_n)
b0v = np.dot(phe0,vl_n)
#import warnings
#warnings.filterwarnings("error")
try:
kb = ((b0v-a0v)-vrdotvl*(b0u-a0u))/(vrdotvl*vrdotvl-1)
except:
pdb.set_trace()
apex = phe0 + kb*vl_n
else: # cone from connected segments
v0n = v0/np.linalg.norm(v0)
try:
v_n = v_/np.linalg.norm(v_)
except:
pdb.set_trace()
# import ipdb
# ipdb.set_trace()
sign = np.sign(np.cross(v_n,v0n))
if sign>0:
vr_n = -v0n
vl_n = v_n
else:
vr_n = v_n
vl_n = -v0n
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.))
#
# the illuminating cone is defined
# the th_mirror to be tested with this cone are known
#
if ( (not np.isclose(angle_cone,0,atol=1e-6) )
and ( not np.isclose(angle_cone,np.pi)) ) :
#if self.cpt==16176:
# pdb.set_trace()
seg,ratio2 = geu.intersect_cone_seg((apex,vl_n),(apex,vr_n),(th_mirror[0],th_mirror[1]),bvis=False)
elif ( not np.isclose(angle_cone,0) ):
ratio2 = 1
else:
ratio2 = 0
#print ratio
if len(seg)==2:
th_mirror = np.vstack((seg[0],seg[1]))
else:
pass
al = np.arctan2(vl_n[1],vl_n[0])
ar = np.arctan2(vr_n[1],vr_n[0])
if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
ratio2 = 1.
# On connecte l'apex du cone courant aux extrémités du segment courant mirroré
# Dans certaines circonstances par example un cone emanant d'un point colinéaire
# avec le segment d'arrivé" (-4) (6,4) le point -4 est aligné avec le segment 6
# l'ouverture du cone est nul => arret. Cela pourrait être géré dans Gi en interdisant
# la visibilité (-4) (6,4)
# if angle_cone ==0:
# ratio = 0
# else:
# if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
# ratio = 1.
# else:
# wseg0 = th_mirror[0] - apex
# wseg1 = th_mirror[1] - apex
# mod_wseg0 = np.sqrt(np.sum(wseg0*wseg0,axis=0))
# mod_wseg1 = np.sqrt(np.sum(wseg1*wseg1,axis=0))
#
# if np.isclose(mod_wseg0,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# if np.isclose(mod_wseg1,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# #wseg0_n = wseg0/mod_wseg0
# #wseg1_n = wseg1/mod_wseg1
# wseg0_n = wseg0/np.linalg.norm(wseg0)
# wseg1_n = wseg1/np.linalg.norm(wseg1)
# aseg0 = np.arctan2(wseg0_n[1],wseg0_n[0])
# aseg1 = np.arctan2(wseg1_n[1],wseg1_n[0])
#
# # if al==aseg0 or al==aseg1 or ar==aseg0 or ar==aseg1:
# # ratio = 1
# #print "toto"
# # else:
# I = geu.angle_intersection2(al,ar,aseg0,aseg1)
# ratio = I/angle_cone
# #if ratio>=1:
# # pdb.set_trace()
#
# # if connected:
# # print "ratio :",ratio
#
#
# #if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (bvisu):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# #
# # magenta : start of the cone
# # cyan :
# # yellow : last interaction
# #
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
# ax = geu.linet(ax,np.array(self.L.Gs.pos[nseg_points[0]]),np.array(self.L.Gs.pos[nseg_points[1]]),al=1,color='yellow',linewidth=4)
# # ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# # ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th_mirror[0,:],th_mirror[1,:],al=1,color='green',linewidth=3)
# nx.draw_networkx_labels(self.L.Gi,
# self.L.Gi.pos,labels={x:str(x) for x in visited},
# ax=ax,fontsize=18)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# pdb.set_trace()
# #if visited == [(104, 23, 17), (1, 17), (53, 17), (108, 17, 18)]:
# # if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (1==0):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
#
# ax = geu.linet(ax,np.array(self.L.Gs.pos[pts[0]]),np.array(self.L.Gs.pos[pts[1]]),al=1,color='yellow',linewidth=4)
# ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# #ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th[0,:],th[1,:],al=1,color='green',linewidth=3)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# plt.show()
#else:
# th = self.L.Gs.pos[nstr]
# th = np.array([th,th])
# ratio = 1
#print self.cpt,ratio,ratio2
#if (ratio>0.1) and (ratio2==0):
# pdb.set_trace()
#print d_excess,dist_excess_max
#if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
#if (ratio > self.threshold):
#
# Update sequence of mirrored points
#
if nstr<0:
tahe.append(th)
else:
tahe.append(th_mirror)
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
#
# Check if the target has been reached
# sequence is valid and last interaction is in the list of targets
#if (interaction in lit) or (interaction[-1]==self.target):
if (interaction in lit):
# idea here is to produce signature without any airwalls
# lawp_tmp is a mask where 0 mean no air wall and 1 = airwall
# anstr does not contains airwalls
# lawp_tmp = [0]+lawp
# lll = [x[0] for ix,x in enumerate(visited) if lawp_tmp[ix]==1]
# print([self.L.Gs.node[x]['name'] for x in lll])
#anstr = np.array([x[0] for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#typ = np.array([len(x) for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#sig = np.array([anstr,typ])
#sighash = hash(str(sig))
# if len(anstr) == 2:
# if (anstr == np.array([323,351])).all():
# import ipdb
# ipdb.set_trace()
anstr = np.array([x[0] for x in visited ])
typ = np.array([len(x) for x in visited])
sig = np.array([anstr,typ])
sighash = hash(str(sig))
if sighash not in lhash:
lhash.append(sighash)
try:
self[len(typ)] = np.vstack((self[len(typ)],sig))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],ratio)
except:
self[len(typ)] = np.vstack((sig))
self.ratio[len(typ)] = np.array([ratio])
# print ('added',visited)
cptsig +=1
if animation:
Nf = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_color='b',
node_size=40,
ax=ax,fig=fig)
Ef = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},
width=0.1,arrows=False,
ax=ax,fig=fig)
cpt=cpt+1
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(Nf)
except:
pass
try:
ax.collections.remove(Ef)
except:
pass
outint = Gi[visited[-2]][interaction]['output'].keys()
#
# proint not used
#
proint = Gi[visited[-2]][interaction]['output'].values()
nexti = [it for it in outint ]
stack.append(iter(nexti))
# 1590 ratio <= threshold
else:
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
lawp.pop()
# 1389 condR and condT and condD
else:
pass
# 1388 cond1 and cond2 and cond3
else:
# if at least 2 interactions
# and antepenultiem is a reflexion
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
#
# Poping
# tahe
# lawp
# stack
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
tahe.pop()
try:
lawp.pop()
except:
pass
stack.pop()
#stack.pop()
def plot_cones(self,L,i=0,s=0,fig=[],ax=[],figsize=(10,10)):
""" display cones of an unfolded signature
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
fig :
ax :
figsize :
"""
if fig == []:
fig= plt.figure()
ax = fig.add_subplot(111)
elif ax ==[]:
ax = fig.add_subplot(111)
pta,phe = self.unfold(L,i=i,s=s)
# | |
"""Points-to / dataflow / cfg graph engine.
It can be used to run reaching-definition queries on a nested CFG graph
and to model path-specific visibility of nested data structures.
"""
import collections
import logging
from pytype import metrics
import pytype.utils
log = logging.getLogger(__name__)
_variable_size_metric = metrics.Distribution("variable_size")
# Across a sample of 19352 modules, for files which took more than 25 seconds,
# the largest variable was, on average, 157. For files below 25 seconds, it was
# 7. Additionally, for 99% of files, the largest variable was below 64, so we
# use that as the cutoff.
MAX_VAR_SIZE = 64
class Program(object):
"""Program instances describe program entities.
This class ties together the CFG, the data flow graph (variables + bindings)
as well as methods. We use this for issuing IDs: We need every CFG node to
have a unique ID, and this class does the corresponding counting.
Attributes:
entrypoint: Entrypoint of the program, if it has one. (None otherwise)
cfg_nodes: CFG nodes in use. Will be used for assigning node IDs.
variables: Variables in use. Will be used for assigning variable IDs.
"""
def __init__(self):
"""Initialize a new (initially empty) program."""
self.entrypoint = None
self.cfg_nodes = []
self.next_variable_id = 0
self.solver = None
self.default_data = None
def CreateSolver(self):
if self.solver is None:
self.solver = Solver(self)
def InvalidateSolver(self):
self.solver = None
def NewCFGNode(self, name=None, condition=None):
"""Start a new CFG node."""
self.InvalidateSolver()
cfg_node = CFGNode(self, name, len(self.cfg_nodes), condition)
self.cfg_nodes.append(cfg_node)
return cfg_node
@property
def variables(self):
return {b.variable for node in self.cfg_nodes for b in node.bindings}
def NewVariable(self, bindings=None, source_set=None, where=None):
"""Create a new Variable.
A Variable typically models a "union type", i.e., a disjunction of different
possible types. This constructor assumes that all the bindings in this
Variable have the same origin(s). If that's not the case, construct a
variable with bindings=[] and origins=[] and then call AddBinding() to add
the different bindings.
Arguments:
bindings: Optionally, a sequence of possible data items this variable can
have.
source_set: If we have bindings, the source_set they *all* depend on. An
instance of SourceSet.
where: If we have bindings, where in the CFG they're assigned.
Returns:
A Variable instance.
"""
variable = Variable(self, self.next_variable_id)
log.trace("New variable v%d" % self.next_variable_id)
self.next_variable_id += 1
if bindings is not None:
assert source_set is not None and where is not None
for data in bindings:
binding = variable.AddBinding(data)
binding.AddOrigin(where, source_set)
return variable
def MergeVariables(self, node, variables):
"""Create a combined Variable for a list of variables.
The purpose of this function is to create a final result variable for
functions that return a list of "temporary" variables. (E.g. function
calls).
Args:
node: The current CFG node.
variables: List of variables.
Returns:
A typegraph.Variable.
"""
if not variables:
return self.NewVariable() # return empty var
elif len(variables) == 1:
v, = variables
return v
elif all(v is variables[0] for v in variables):
return variables[0]
else:
v = self.NewVariable()
for r in variables:
v.PasteVariable(r, node)
return v
class CFGNode(object):
"""A node in the CFG.
Assignments within one CFG node are treated as unordered: E.g. if "x = x + 1"
is in a single CFG node, both bindings for x will be visible from inside that
node.
Attributes:
program: The Program instance we belong to
id: Numerical node id.
name: Name of this CFGNode, or None. For debugging.
incoming: Other CFGNodes that are connected to this node.
outgoing: CFGNodes we connect to.
bindings: Bindings that are being assigned to Variables at this CFGNode.
reachable_subset: A subset of the nodes reachable (going backwards) from
this one.
condition: None if no condition is set at this node;
The binding representing the condition which needs to be
fulfilled to take the branch represented by this node.
"""
__slots__ = ("program", "id", "name", "incoming", "outgoing", "bindings",
"reachable_subset", "condition")
def __init__(self, program, name, cfgnode_id, condition):
"""Initialize a new CFG node. Called from Program.NewCFGNode."""
self.program = program
self.id = cfgnode_id
self.name = name
self.incoming = set()
self.outgoing = set()
self.bindings = set() # filled through RegisterBinding()
self.reachable_subset = {self}
self.condition = condition
def ConnectNew(self, name=None, condition=None):
"""Add a new node connected to this node."""
cfg_node = self.program.NewCFGNode(name, condition)
self.ConnectTo(cfg_node)
return cfg_node
def ConnectTo(self, cfg_node):
"""Connect this node to an existing node."""
self.program.InvalidateSolver()
self.outgoing.add(cfg_node)
cfg_node.incoming.add(self)
cfg_node.reachable_subset |= self.reachable_subset
def CanHaveCombination(self, bindings):
"""Quick version of HasCombination below."""
goals = set(bindings)
seen = set()
stack = [self]
# TODO(kramm): Take blocked nodes into account, like in Bindings()?
while stack and goals:
node = stack.pop()
if node in seen:
continue
seen.add(node)
goals -= goals & node.bindings
stack.extend(node.incoming)
return not goals
def HasCombination(self, bindings):
"""Query whether a combination is possible.
Query whether its possible to have the given combination of bindings at
this CFG node (I.e., whether they can all be assigned at the same time.)
This will e.g. tell us whether a return binding is possible given a specific
combination of argument bindings.
Arguments:
bindings: A list of Bindings.
Returns:
True if the combination is possible, False otherwise.
"""
self.program.CreateSolver()
# Optimization: check the entire combination only if all of the bindings
# are possible separately.
return (all(self.program.solver.Solve({b}, self) for b in bindings)
and self.program.solver.Solve(bindings, self))
def RegisterBinding(self, binding):
self.bindings.add(binding)
def Label(self):
"""Return a string containing the node name and id."""
return "<%d>%s" % (self.id, self.name)
def __repr__(self):
return "<cfgnode %d %s>" % (self.id, self.name)
def AsciiTree(self, forward=False):
"""Draws an ascii tree, starting at this node.
Args:
forward: If True, draw the tree starting at this node. If False, draw
the "reverse" tree that starts at the current node when following all
edges in the reverse direction.
The default is False, because during CFG construction, the current node
typically doesn't have any outgoing nodes.
Returns:
A string.
"""
if forward:
return pytype.utils.ascii_tree(self, lambda node: node.outgoing)
else:
return pytype.utils.ascii_tree(self, lambda node: node.incoming)
class SourceSet(frozenset):
"""A SourceSet is a combination of Bindings that was used to form a Binding.
In this context, a "source" is a Binding that was used to create another
Binding. E.g., for a statement like "z = a.x + y", a, a.x and y would be the
Sources to create z, and would form a SourceSet.
"""
__slots__ = ()
class Origin(collections.namedtuple("_", "where, source_sets")):
"""An "origin" is an explanation of how a binding was constructed.
It consists of a CFG node and a set of sourcesets.
Attributes:
where: The CFG node where this assignment happened.
source_sets: Possible SourceSets used to construct the binding we belong to.
A set of SourceSet instances.
"""
__slots__ = ()
def __new__(cls, where, source_sets=None):
return super(Origin, cls).__new__(
cls, where, source_sets or set())
def AddSourceSet(self, source_set):
"""Add a new possible source set."""
self.source_sets.add(SourceSet(source_set))
class Binding(object):
"""A Binding assigns a binding to a (specific) variable.
Bindings will therefore be stored in a dictionary in the Variable class,
mapping strings to Binding instances.
Depending on context, a Binding might also be called a "Source" (if it's
used for creating another binding) or a "goal" (if we want to find a solution
for a path through the program that assigns it).
A binding has history ("origins"): It knows where the binding was
originally retrieved from, before being assigned to something else here.
Origins contain, through source_sets, "sources", which are other bindings.
"""
__slots__ = ("program", "variable", "origins", "data", "_cfgnode_to_origin")
def __init__(self, program, variable, data):
"""Initialize a new Binding. Usually called through Variable.AddBinding."""
self.program = program
self.variable = variable
self.origins = []
self.data = data
self._cfgnode_to_origin = {}
def IsVisible(self, viewpoint):
"""Can we "see" this binding from the current cfg node?
This will run a solver to determine whether there's a path through the
program that makes our variable have this binding at the given CFG node.
Arguments:
viewpoint: The CFG node at which this binding is possible / not possible.
Returns:
True if there is at least one path through the program
in which the binding was assigned (and not overwritten afterwards), and
all the bindings it depends on were | |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 14:15:59 2020
Modified a lot between April 2020 and July 2021
@author: <NAME>
email: <EMAIL>
This code generates a single fluorescence peak for a given atomic species,
oven geometry, and oven temperature. The default atomic angular distribution
is the molecular flow limit from Olander. One can readily toggle this to a
cosine distribution (the limit of an infinite nozzle ratio) or add a new
angular distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# ytterbium = Yb, rubidium = Rb, calcium = Ca...
# species = 'Yb'
# species = 'Ca'
species = 'Rb'
# atomic angular distribution
angdist = 'molecularflow'
# angdist = 'cosine'
scantype='test'
# scantype='fullscan'
# main program
def main(species='Rb',angdist='molecularflow',scantype='test'):
start_time = datetime.now()
print("\n ****************** \n simulate_ABF starting. Time is %s\n"%start_time)
single_column = 1
plot_dims,font_size = abf.load_plot_settings(single_column)
# excitation rate integral type. For a directed beam we use the general
# formula which includes an integral over the maxwell-boltzmann speed distribution.
integral = 'maxboltz'
# =============================================================================
# load the atom and apparatus values
# =============================================================================
(nu_o, mass, A_Einstein,tau_o,f_a, vp_coeff,nozzle_radius,
nozzle_to_intersxn,det_y,det_radius,nozzle_ratio,r_m,oven_temp,
laser_radius,laser_power) = abf.load_atom_data(species)
if angdist == 'cosine':
nozzle_ratio = np.infty
print('\n user has selected cosine ang. dist., updating nozzle ratio -> infinity')
# =============================================================================
# volume integral settings. Want the smallest max cube side length that
# integrates over all the nonzero stuff and the smallest number of integral
# steps while keeping the computation time reasonable. As a general rule
# 1mm or less microcubes are good, e.g. maxcube_side length = 30 mm and
# volume_int_steps = 31. Also choose an odd number of steps so that the center step
# is super close to the center of the fluorescence volume.
#
# Provide lengths in meters, frequencies in Hz
# =============================================================================
if scantype == 'fullscan':
# this is the full scan, takes hours
[volume_laser_axis_int_steps,max_cube_side_length_laser_axis,
max_cube_side_length_transverse_axis,
laser_scan_width,laser_freq_step_size]= [41,16e-3,16e-3,1000*1e6,5e6] # 12 hours for rubidium with collimated beam.
elif scantype == 'test':
# should run in like a minute
[volume_laser_axis_int_steps,max_cube_side_length_laser_axis,
max_cube_side_length_transverse_axis,
laser_scan_width,laser_freq_step_size]= [7,16e-3,16e-3,1000*1e6,50e6] # 12 hours for rubidium with collimated beam.
# =============================================================================
# begin atutomated laser scan settings. This section sets up the laser scan steps
# and microcube steps. These settings do not need to be modified unless special
# modifications of the microcube / laser is needed
# =============================================================================
laser_freq_wrt_res_min = -laser_scan_width/2. # Hz. Laser scan starting point
laser_freq_wrt_res_max = laser_scan_width/2. # Hz. Laser scan end point
float_steps = np.ceil((laser_freq_wrt_res_max-laser_freq_wrt_res_min)/laser_freq_step_size)
laser_steps = int(float_steps + np.mod(float_steps,2)+1)
laser_freq_wrt_res = np.linspace(laser_freq_wrt_res_min,laser_freq_wrt_res_max,laser_steps,axis=0)
laser_axis_range = max_cube_side_length_laser_axis
transverse_axis_range = max_cube_side_length_transverse_axis # m
ten_pct_of_scans = int(laser_steps/10.) # for tracking laser scan progress.
x_steps = volume_laser_axis_int_steps
microcube_side_length = laser_axis_range/x_steps
dvol = microcube_side_length**3
x_start = (-1.)*0.499*laser_axis_range
y_start = (-1.)*0.499*transverse_axis_range
x_stop = .501*laser_axis_range
y_stop = .501*transverse_axis_range
# make sure odd number of steps so we get close to the origin
yz_steps = len(np.arange(y_start,y_stop,microcube_side_length))
yz_steps_definitely_even = yz_steps + np.mod(yz_steps,2)
if yz_steps_definitely_even == yz_steps:
y_start = (-1.)*0.499*(transverse_axis_range+microcube_side_length)
y_stop = .501*(transverse_axis_range+microcube_side_length)
yz_steps = len(np.arange(y_start,y_stop,microcube_side_length))
z_start = y_start+nozzle_to_intersxn
z_stop = y_stop + nozzle_to_intersxn
print('steps along x, y, z: %i, %i, %i\ntotal elements: %i\nmicrocube side length: %.3e meters'%(
x_steps,yz_steps,yz_steps,x_steps*yz_steps**2,microcube_side_length)
)
# =============================================================================
# end automated settings
# =============================================================================
def generate_microcube_coordinates(x_start,x_stop,x_steps,y_start,y_stop,y_steps,z_start,z_stop,z_steps):
X,Y,Z = np.mgrid[x_start:x_stop:x_steps*1j,y_start:y_stop:y_steps*1j,z_start:z_stop:z_steps*1j]
x = X.flatten()
y = Y.flatten()
z = Z.flatten()
return x,y,z
def plot_the_alphas(z,cos_alpha):
alpha = np.arccos(cos_alpha)
alpha_degrees=alpha*180./np.pi
alpha_dev_pitwo = (alpha - np.pi/2)*2/np.pi*100
fig = plt.figure(figsize=np.multiply(plot_dims,2))
ax1 = plt.subplot2grid((2,2), (0,0),)
ax1.scatter(z,alpha_dev_pitwo,marker='x',edgecolor='black',linewidth=0.15)
ax1.tick_params(labelsize=font_size+4)
plt.ylabel(r'$\alpha - \pi/2$ (\%)',fontsize=font_size+6)
plt.xlabel(r'$z$\ (m)',fontsize=font_size+6)
ax2 = plt.subplot2grid((2,2), (0,1),)
ax2.scatter(z,cos_alpha,marker='x',edgecolor='black',linewidth=0.15)
ax2.tick_params(labelsize=font_size+4)
plt.ylabel(r'$\cos(\alpha)$',fontsize=font_size+6)
plt.xlabel(r'$z$\ (m)',fontsize=font_size+6)
ax3 = plt.subplot2grid((2,2), (1,1),)
ax3.scatter(z,alpha_degrees,marker='x',edgecolor='black',linewidth=0.15)
ax3.tick_params(labelsize=font_size+4)
plt.ylabel(r'$\alpha$\ (degrees)',fontsize=font_size+6)
plt.xlabel(r'$z$\ (m)',fontsize=font_size+6)
plt.tight_layout()
plt.savefig(''.join((species,'-abf-beam-angles.png')))
plt.close(fig)
def calculate_photon_atom_yield(jm,speed_z,pos,this_excitation_rate_everywhere,solid_angle):
return np.sum(
(jm/(speed_z*(pos**2)))*(det_y**2/(abc.a_det))*this_excitation_rate_everywhere*solid_angle*dvol
)
def calc_pow_detector(speed_z,flux,this_excitation_rate_everywhere,solid_angle):
return np.sum(dvol*abc.h_Planck*nu_o/speed_z*(
flux/(4.*np.pi)*this_excitation_rate_everywhere*solid_angle))
print('---\nlaser scan from %.1e MHz to %.1e Mhz wrt nu_o \nstep size %.1e Mhz, %i steps total'%(
laser_freq_wrt_res_min*1e-6,laser_freq_wrt_res_max*1e-6,laser_freq_step_size*1e-6,laser_steps
))
print('\nfluorescence integral volume = %.3e m by %.3e m by %.3e m prism\n microcube side length = %.3e m'%(
laser_axis_range,transverse_axis_range,transverse_axis_range,microcube_side_length)
)
# most probable speed (m/s)
mp_speed = abf.most_probable_speed(oven_temp,mass)
# x, y, z arrays of all the microcube positions
x,y,z = generate_microcube_coordinates(x_start,x_stop,x_steps,y_start,y_stop,yz_steps,z_start,z_stop,yz_steps)
# scalar distance from origin
pos = np.sqrt(x**2 + y**2 + z**2)
# index of x=0 position (should be very close to 0)
xmid = int(np.ceil(len(x)/2))
# angle between beam axis and position we're integrating over
theta = np.arctan(np.sqrt((x**2+y**2))/(z))
cos_theta = z/pos
# angle between microcube vec and laser vec, defined to be along x
cos_alpha = (x/pos)
# angular distribution value. I've been using molecular flow limit,
# Erin will probably want to update with an intermediate flow
# distribution.
if angdist == 'cosine':
jm = cos_theta
else:
jm = abf.jm_dist_elwise(nozzle_ratio,theta)
# z component of most probable speed
speed_z = cos_theta*mp_speed
# axial position relative to laser axis
rho = np.sqrt(y**2+(z-nozzle_to_intersxn)**2)
fluorescence_center_index = np.argmin(np.sqrt(x**2 + y**2 + (z-nozzle_to_intersxn)**2))
# this function generates a few subplots of the angle between the atom
# velocity and laser axis, gives an idea of the effect on Doppler broadening
plot_the_alphas(z,cos_alpha)
# numerical integration of flat circular detector area
solid_angle = (
abf.solid_angle_circular_detector_elwise(det_radius,
det_y,nozzle_to_intersxn,x,y,z)
)
# the flux of atoms exiting the oven
flux = abf.atom_flux_elwise(oven_temp,jm,nozzle_radius,pos,mass,vp_coeff,speed_z)
# This is the start of the laser scan. The first value is outside a loop
# to initalize the arrays.
print('---\nstarting laser scan\n')
print('%.2e MHz , time %s'%(laser_freq_wrt_res[0]*1e-6,datetime.now()))
def the_laser_scan():
excitation_rate_everywhere = np.append([abf.excitation_rate(laser_freq_wrt_res[0],nu_o,A_Einstein,
mass,oven_temp,cos_alpha,laser_power,rho,laser_radius,f_a)],
[abf.excitation_rate(laser_freq_wrt_res[1],nu_o,A_Einstein,
mass,oven_temp,cos_alpha,laser_power,rho,laser_radius,f_a)],axis=0)
# the excitation_rate
transpose_excitation_rate_everywhere = np.transpose(excitation_rate_everywhere)
# These are the excitation rates at the fluorescence origin, so they should be
# maximum values
excitation_rate = transpose_excitation_rate_everywhere[:][fluorescence_center_index]
# eta, the number of photons emitted per atom
eta = np.sum(
(jm/(speed_z*(pos**2)))*(det_y**2/(abc.a_det))*excitation_rate_everywhere*solid_angle*dvol,
axis=1
)
# the power incident on the photodetector
pow_detector = np.sum(dvol*abc.h_Planck*nu_o/speed_z*(
flux/(4.*np.pi)*excitation_rate_everywhere*solid_angle),
axis=1)
scan_count = 1
# The rest of the laser frequency steps are handled in a FOR loop.
for this_laser_freq in laser_freq_wrt_res[2:]:
scan_count = scan_count + 1
if np.mod(scan_count,ten_pct_of_scans) == 0:
print('%.2e MHz , time %s'%(this_laser_freq*1e-6,datetime.now()))
# the excitation rate, also equal to fluorescence rate in weak pumping limit
this_excitation_rate_everywhere = abf.excitation_rate(this_laser_freq,
nu_o,A_Einstein,mass,oven_temp,cos_alpha,laser_power,rho,laser_radius,f_a)
# for the datafile, we want to see the peak excitation rate, i.e. center of fluorescence region
excitation_rate = np.append(excitation_rate,
this_excitation_rate_everywhere[fluorescence_center_index])
# photons per atom eta
eta = np.append(eta,[calculate_photon_atom_yield(jm,speed_z,pos,
this_excitation_rate_everywhere,
solid_angle)],axis=0)
# power incident on the detector
pow_detector = np.append(pow_detector,
[calc_pow_detector(speed_z,flux,this_excitation_rate_everywhere,
solid_angle)],axis=0)
# END laser scan
return excitation_rate,eta,pow_detector
excitation_rate,eta,pow_detector, = the_laser_scan()
# voltage on the photodetector
pd_voltage = abf.voltage_on_pd(pow_detector,r_m,abc.g)
# number density of the oven
o_num_density = abf.oven_number_density(vp_coeff,oven_temp)
# the rate of atoms exiting the oven
atom_exit_rate = abf.oven_atom_exit_rate(o_num_density,
speed_z,nozzle_radius)
max_atom_exit_rate = np.max(atom_exit_rate)
origin_solid_angle = solid_angle[xmid]
# =============================================================================
# Calculate how long program took.
# =============================================================================
end_time = datetime.now()
print("\n Finished calculations. Time is %s\n ****************** \n"%end_time)
total_time = end_time - start_time
total_time_days = total_time.days
total_time_minutes = float(total_time.seconds/60.)
total_time_seconds = float(total_time.seconds)
print("\n Calculations took %i days and %.1f minutes (%.1f seconds) to run.\n "%(
total_time_days,total_time_minutes,total_time_seconds))
save_timestamp = datetime.now().isoformat(timespec='seconds',sep='-').replace(":","")
# These are some useful plots that can be used to check that the
# calculated excitation rate makes sense.
fig, ax = plt.subplots(figsize=plot_dims)
plt.scatter(laser_freq_wrt_res*1e-6,excitation_rate,marker='o',facecolors='none',
edgecolor="red")
plt.title('%s abf sim excitation rate, $2r/L=$ %.3f'%(species,nozzle_ratio))
plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
plt.ylabel(r'$R$ (s$^{-1}$)')
plt.xlabel(r'laser freq $- \nu_0$ (MHz)')
plt.tight_layout()
plt.savefig(''.join((save_timestamp,'-',species,'-abfsim-R-exc.pdf')))
plt.close(fig)
fig, ax = plt.subplots(figsize=plot_dims)
plt.scatter(laser_freq_wrt_res*1e-6,pow_detector,marker='o',facecolors='none',
edgecolor="red")
plt.title('%s abf sim photodetector power, $2r/L=$ %.3f'%(species,nozzle_ratio))
plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
plt.ylabel(r'$P_{\gamma}$ (W)')
plt.xlabel(r'laser freq $- \nu_0$ (MHz)')
plt.tight_layout()
plt.savefig(''.join((save_timestamp,'-',species,'-abfsim-power.pdf')))
plt.close(fig)
fig, ax = plt.subplots(figsize=plot_dims)
plt.scatter(laser_freq_wrt_res*1e-6,eta,marker='o',facecolors='none',
edgecolor="red")
plt.title('%s abf sim photon-atom yield, $2r/L=$ %.3f'%(species,nozzle_ratio))
plt.ylabel(r'$\eta$ (photons / atom)')
plt.xlabel(r'laser freq $- \nu_0$ (MHz)')
plt.tight_layout()
plt.savefig(''.join((save_timestamp,'-',species,'-abfsim-eta-yield.pdf')))
plt.close(fig)
# Save tab-delimited arrays of important | |
(logging.level): Default setting is logging.INFO
Raises:
GPUdbException: If the value passed is not one of logging.INFO
or logging.DEBUG etc.
"""
try:
self._logger.setLevel(value)
except (ValueError, TypeError, Exception) as ex:
raise GPUdbException("Invalid log level: '{}'".format(str(ex)))
class _TableEvent(enum.Enum):
""" Enum for table monitor event types
This is an internal enum used for two purposes:
1. Generating an internal operation list by parsing the callbacks passed
to the Client class. The operations used for creating
the table monitors are INSERT, UPDATE and DELETE. The other two are
used for callbacks related to dropped and altered table notifications.
2. Create the required table monitor of the right type by traversing
the operation list.
This is not meant to be used by the users of this API.
"""
INSERT = 1
"""
int: Indicates an INSERT event has occurred
"""
UPDATE = 2
"""
int: Indicates an UPDATE event has occurred
"""
DELETE = 3
"""
int: Indicates a DELETE event has occurred
"""
TABLE_ALTERED = 4
"""
int: Indicates a table has been altered
"""
TABLE_DROPPED = 5
"""
int: Indicates a table has been dropped
"""
# End Client class
class Options(object):
"""
Encapsulates the various options used to create a table monitor. The
class is initialized with sensible defaults which can be overridden by
the users of the class. The following options are supported:
* **inactivity_timeout**
This option controls the time interval to set the timeout to
determine when the program would do idle time processing like checking
for the table existence, server HA failover, etc., if needed. It is
specified in minutes as a float so that seconds can be accommodated
as well. The default value is set to 20 minutes, which is
converted internally to seconds.
Example usage::
options = GPUdbTableMonitor.Options(_dict=dict(
inactivity_timeout=0.1
))
"""
__inactivity_timeout = 'inactivity_timeout'
__INACTIVITY_TIMEOUT_DEFAULT = 20 * 60 * 1000
_supported_options = [
__inactivity_timeout
]
@staticmethod
def default():
"""Create a default set of options for :class:`.Client`
Returns:
:class:`GPUdbTableMonitor.Options` instance
"""
return GPUdbTableMonitor.Options()
def __init__(self, _dict=None):
""" Constructor for :class:`GPUdbTableMonitor.Options` class
Parameters:
_dict (dict)
Optional dictionary with options already loaded. Value can
be None; if it is None suitable sensible defaults will be
set internally.
Returns:
A :class:`GPUdbTableMonitor.Options` object.
"""
# Set default values
# Default is 0.1 minutes = 6 secs
self._inactivity_timeout = self.__INACTIVITY_TIMEOUT_DEFAULT
if _dict is None:
return # nothing to do
if not isinstance(_dict, dict):
raise GPUdbException(
"Argument '_dict' must be a dict; given '%s'."
% type(_dict))
# Else,_dict is a dict; extract options from within it
# Check for invalid options
unsupported_options = set(_dict.keys()).difference(
self._supported_options)
if unsupported_options:
raise GPUdbException(
"Invalid options: %s" % unsupported_options)
# Extract and save each option
for (key, val) in _dict.items():
setattr(self, key, val)
# end __init__
@property
def inactivity_timeout(self):
"""This is the getter for the property `inactivity_timeout`.
This is specified in minutes as a float so that seconds can be
accommodated. This indicates a timeout interval after which if no
notification is received from the server table monitors, the program
will check whether everything is alright, like whether the table is
still there and in the process will automatically trigger HA
failover if needed.
The default value is set to 20 minutes converted to milliseconds.
Returns:
The value of the timeout as set in the
:class:`GPUdbTableMonitor.Options` instance.
"""
return self._inactivity_timeout
@inactivity_timeout.setter
def inactivity_timeout(self, val):
"""This is the setter for the property `inactivity_timeout`.
Parameters:
val (float)
This value is in minutes and internally converted to float
so that seconds can be accommodated easily. The default
value is 20 minutes converted to milliseconds.
"""
try:
value = float(val)
except:
raise GPUdbException(
"Property 'inactivity_timeout' must be numeric; "
"given {}".format(str(type(val))))
# Must be > 0
if (value <= 0):
raise GPUdbException(
"Property 'inactivity_timeout' must be "
"greater than 0; given {}".format(str(value)))
# Convert the value to milliseconds
self._inactivity_timeout = val * 60 * 1000
def as_json(self):
"""Return the options as a JSON"""
result = {}
if self.__inactivity_timeout is not None:
result[self.__inactivity_timeout] = self._inactivity_timeout
return result
# end as_json
def as_dict(self):
"""Return the options as a dict """
return self.as_json()
# end as_dict
# End Options class
class Callback(object):
"""Use this class to indicate which type of table monitor is desired.
When the :class:`.Client` is constructed, a list of objects of this
class has to be supplied to the constructor of the class.
If the list of callbacks is empty or the list does not contain at least
one of the callbacks of types (:class:`.Callback.Type`)
``INSERT_DECODED``, ``INSERT_RAW``, ``DELETED``, or ``UPDATED``, no
table monitor would be created internally and the program would raise a
:class:`gpudb.GPUdbException` and exit. So, a list of objects of this
class is mandatory for the table monitor to function.
An example of using this class and passing on to the constructor of
:class:`.Client` is as follows::
class GPUdbTableMonitorExample(GPUdbTableMonitor.Client):
def __init__(self, db, table_name, options=None):
# Create the list of callbacks objects which are to be passed to the
# GPUdbTableMonitor.Client class constructor
# This example shows only two callbacks being created so
# only an insert type table monitor will be created. For other
# types callback objects could be created similarly to receive
# notifications about other events.
callbacks = [
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_RAW,
self.on_insert_raw,
self.on_error),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_DECODED,
self.on_insert_decoded,
self.on_error,
GPUdbTableMonitor.Callback.InsertDecodedOptions( GPUdbTableMonitor.Callback.InsertDecodedOptions.DecodeFailureMode.ABORT ))
]
# Invoke the base class constructor and pass in the list of callback
# objects created earlier. This invocation is mandatory for the table
# monitor to be actually functional.
super(GPUdbTableMonitorExample, self).__init__(
db, table_name,
callbacks, options=options)
"""
def __init__(
self,
callback_type,
event_callback,
error_callback=None,
event_options=None,
):
"""
Constructor for this class.
Parameters:
callback_type (:class:`.Callback.Type`)
This indicates the type of the table monitor this callback
will be used for. It must be of the type
:class:`.Callback.Type` enum.
event_callback (method reference)
This is to be called for any event related to an operation
on a table like insert, update, delete etc. As soon as such
an event is observed this method will be called.
This method can have only one parameter. For each
table monitor event (`callback_type`), the parameter would
be different. The method name has got no significance as
long as the signature is as given below::
def method_name(param):
# param - Could be a (dict|bytes|int|str)
# depending on the :attr:`callback_type`
# Processing Code follows ....
The method thus defined does not return anything.
The following table describes the parameter types which
correspond to each of the `callback_type`:
== ================== ==============
NO callback_type Parameter Type
== ================== ==============
1 ``INSERT_DECODED`` ``dict``
2 ``INSERT_RAW`` ``bytes``
3 ``DELETED`` ``int``
4 ``UPDATED`` ``int``
5 ``TABLE_ALTERED`` ``str``
6 ``TABLE_DROPPED`` ``str``
== ================== ==============
error_callback (method reference)
Optional parameter.
This will be called in case of any operational error that
typically could manifest in the form of some exception
(:class:`gpudb.GPUdbException`).
The name of the method does not matter. It must have only
one argument of type ``str``. The argument to this method
will contain information related to the error that
occurred; often details about any exception that was
raised.
The signature of this method has to be::
def method_name(param):
# param - str
# code here ...
event_options (:class:`.Callback.Options`)
Optional parameter.
Options applicable to a specific callback type, e.g.,
insert, delete, update etc. Right now, the only option
applicable is for the callback handling insertion of records
where the record information is decoded and sent to the
callback by the table monitor.
"""
if isinstance(callback_type, GPUdbTableMonitor.Callback.Type):
self.__type = callback_type
else:
raise GPUdbException(
"Argument type must be of type "
"Callback.Type enum ...")
if not self.__check_whether_function(error_callback):
raise GPUdbException("'error_callback' passed in is not a "
"valid method reference")
if not self.__check_whether_function(event_callback):
raise GPUdbException("'event_callback' passed in is not a "
"valid method reference")
self.__event_callback = event_callback
self.__error_callback = error_callback
if event_options is not None and not isinstance(event_options, GPUdbTableMonitor.Callback.Options):
raise GPUdbException("event_options must be of type class 'Options'"
" or a | |
1))
Turbine.servcall(self, dae)
def init1(self, dae):
self.servcall(dae)
mva = self.system.mva
self.p0 = mul(self.p0, 1)
self.v120 = self.v12
self.toMb = div(mva, self.Sn) # to machine base
self.toSb = self.Sn / mva # to system base
rs = matrix(self.rs)
xd = matrix(self.xd)
xq = matrix(self.xq)
psip = matrix(self.psip)
Pg = matrix(self.p0)
# rotor speed
omega = 1 * (ageb(mva * Pg, self.Sn)) + \
mul(0.5 + 0.5 * mul(Pg, self.toMb),
aandb(agtb(Pg, 0), altb(mva * Pg, self.Sn))) + \
0.5 * (aleb(mva * Pg, 0))
theta = mul(self.Kp, mround(1000 * (omega - 1)) / 1000)
theta = mmax(theta, 0)
# variables to initialize iteratively: vsd, vsq, isd, isq
vsd = matrix(0.8, (self.n, 1))
vsq = matrix(0.6, (self.n, 1))
isd = matrix(self.p0 / 2)
isq = matrix(self.p0 / 2)
for i in range(self.n):
# vsd = 0.5
# vsq = self.psip[i]
# isd = Pg / 2
# isq = Pg / 2
x = matrix([vsd[i], vsq[i], isd[i], isq[i]])
mis = ones(4, 1)
jac = sparse(matrix(0, (4, 4), 'd'))
iter = 0
while (max(abs(mis))) > self.system.tds.config.tol:
if iter > 40:
logger.error(
'Initialization of WTG4DC <{}> failed.'.format(
self.name[i]))
break
mis[0] = x[0] * x[2] + x[1] * x[3] - Pg[i]
# mis[1] = omega[i]*x[3] * (psip[i] + (xq[i] - xd[i]) * x[2])\
# - Pg[i]
mis[1] = omega[i] * x[3] * (psip[i] - xd[i] * x[2]) - Pg[i]
mis[2] = -x[0] - rs[i] * x[2] + omega[i] * xq[i] * x[3]
mis[3] = x[1] + rs[i] * x[3] + omega[i] * xd[i] * x[2] - \
omega[i] * psip[i]
jac[0, 0] = x[2]
jac[0, 1] = x[3]
jac[0, 2] = x[0]
jac[0, 3] = x[1]
jac[1, 2] = omega[i] * x[3] * (-xd[i])
jac[1, 3] = omega[i] * (psip[i] + (-xd[i]) * x[2])
jac[2, 0] = -1
jac[2, 2] = -rs[i]
jac[2, 3] = omega[i] * xq[i]
jac[3, 1] = 1
jac[3, 2] = omega[i] * xd[i]
jac[3, 3] = rs[i]
linsolve(jac, mis)
x -= mis
iter += 1
vsd[i] = x[0]
vsq[i] = x[1]
isd[i] = x[2]
isq[i] = x[3]
dae.y[self.isd] = isd
dae.y[self.vsd] = vsd
dae.y[self.vsq] = vsq
dae.x[self.isq] = isq
dae.x[self.omega_m] = mul(self.u0, omega)
dae.x[self.theta_p] = mul(self.u0, theta)
dae.y[self.pwa] = mmax(mmin(2 * dae.x[self.omega_m] - 1, 1), 0)
self.ps0 = mul(vsd, isd) + mul(vsq, isq)
self.qs0 = mul(vsq, isd) - mul(vsd, isq)
self.te0 = mul(isq, psip + mul(xq - xd, isd))
dae.y[self.te] = self.te0
dae.y[self.ps] = self.ps0
MPPT.init1(self, dae)
Turbine.init1(self, dae)
self.system.rmgen(self.dcgen)
def gcall(self, dae):
Turbine.gcall(self, dae)
MPPT.gcall(self, dae)
dae.g[self.isd] = -self.qs0 + mul(dae.y[self.isd],
dae.y[self.vsq]) - mul(
dae.x[self.isq], dae.y[self.vsd])
dae.g[self.vsd] = -dae.y[self.vsd] - mul(
dae.y[self.isd], self.rs) + mul(dae.x[self.isq],
dae.x[self.omega_m], self.xq)
dae.g[self.vsq] = -dae.y[self.vsq] - mul(
dae.x[self.isq], self.rs) - mul(
dae.x[self.omega_m],
-self.psip + mul(dae.y[self.isd], self.xd))
dae.g[self.ps] = -dae.y[self.ps] + mul(
dae.y[self.isd], dae.y[self.vsd]) + mul(dae.x[self.isq],
dae.y[self.vsq])
dae.g[self.te] = -dae.y[self.te] + mul(
dae.x[self.isq],
self.psip + mul(dae.y[self.isd], self.xq - self.xd))
dae.g += spmatrix(
-mul(dae.y[self.ps], div(1, dae.y[self.v1] - dae.y[self.v2])),
self.v1, [0] * self.n, (dae.m, 1), 'd')
dae.g += spmatrix(
mul(dae.y[self.ps], div(1, dae.y[self.v1] - dae.y[self.v2])),
self.v2, [0] * self.n, (dae.m, 1), 'd')
def fcall(self, dae):
Turbine.gcall(self, dae)
dae.f[self.omega_m] = mul(
0.5, div(1, self.H),
-dae.y[self.te] + mul(dae.y[self.pw], div(1, dae.x[self.omega_m])))
# dae.f[self.isq] = mul(
# div(1, self.Teq), -dae.x[self.isq] + mul(
# self.toSb, dae.y[self.pwa] - mul(self.Kdc, self.v12),
# div(1, dae.x[self.omega_m]),
# div(1, self.psip - mul(dae.y[self.isd], self.xd))))
dae.f[self.isq] = mul(
div(1, self.Teq), -dae.x[self.isq] + mul(
self.toSb, div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd)),
dae.y[self.pwa] - mul(self.Kcoi, dae.y[self.dwdt_coi]) - mul(
self.Kdc,
(dae.y[self.v1] - dae.y[self.v2]) - self.v120) - mul(
self.Ki, dae.y[self.dwdt])))
@property
def v12(self):
dae = self.system.dae
return dae.y[self.v1] - dae.y[self.v2]
def gycall(self, dae):
Turbine.gycall(self, dae)
MPPT.gycall(self, dae)
dae.add_jac(Gy, -dae.x[self.isq], self.isd, self.vsd)
dae.add_jac(Gy, dae.y[self.vsq], self.isd, self.isd)
dae.add_jac(Gy, dae.y[self.isd], self.isd, self.vsq)
dae.add_jac(Gy, -mul(dae.x[self.omega_m], self.xd), self.vsq, self.isd)
dae.add_jac(Gy, dae.y[self.isd], self.ps, self.vsd)
dae.add_jac(Gy, dae.y[self.vsd], self.ps, self.isd)
dae.add_jac(Gy, dae.x[self.isq], self.ps, self.vsq)
dae.add_jac(Gy, mul(dae.x[self.isq], self.xq - self.xd), self.te,
self.isd)
dae.add_jac(Gy, -div(1, dae.y[self.v1] - dae.y[self.v2]), self.v1,
self.ps)
dae.add_jac(
Gy, -mul(dae.y[self.ps], (dae.y[self.v1] - dae.y[self.v2])**-2),
self.v1, self.v2)
dae.add_jac(Gy,
mul(dae.y[self.ps], (dae.y[self.v1] - dae.y[self.v2])**-2),
self.v1, self.v1)
dae.add_jac(Gy, div(1, dae.y[self.v1] - dae.y[self.v2]), self.v2,
self.ps)
dae.add_jac(Gy,
mul(dae.y[self.ps], (dae.y[self.v1] - dae.y[self.v2])**-2),
self.v2, self.v2)
dae.add_jac(
Gy, -mul(dae.y[self.ps], (dae.y[self.v1] - dae.y[self.v2])**-2),
self.v2, self.v1)
def fxcall(self, dae):
Turbine.jac0(self, dae)
dae.add_jac(Gx, -dae.y[self.vsd], self.isd, self.isq)
dae.add_jac(Gx, mul(dae.x[self.isq], self.xq), self.vsd, self.omega_m)
dae.add_jac(Gx, mul(dae.x[self.omega_m], self.xq), self.vsd, self.isq)
dae.add_jac(Gx, self.psip - mul(dae.y[self.isd], self.xd), self.vsq,
self.omega_m)
dae.add_jac(Gx, dae.y[self.vsq], self.ps, self.isq)
dae.add_jac(Gx, self.psip + mul(dae.y[self.isd], self.xq - self.xd),
self.te, self.isq)
dae.add_jac(
Fx,
mul(-0.5, dae.y[self.pw], div(1, self.H), (dae.x[self.omega_m])
** -2), self.omega_m, self.omega_m)
dae.add_jac(
Fx, -mul(
div(1, self.Teq), (dae.x[self.omega_m])**-2,
div(1, self.psip - mul(dae.y[self.isd], self.xd)),
dae.y[self.pwa] - mul(self.Kcoi, dae.y[self.dwdt_coi]) - mul(
self.Kdc, dae.y[self.v1] - dae.y[self.v2]) - mul(
self.Ki, dae.y[self.dwdt])), self.isq, self.omega_m)
dae.add_jac(Fy, mul(0.5, div(1, self.H), div(1, dae.x[self.omega_m])),
self.omega_m, self.pw)
dae.add_jac(
Fy,
mul(self.Kdc, div(1, self.Teq), div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd))), self.isq,
self.v2)
dae.add_jac(
Fy, -mul(self.Ki, div(1, self.Teq), div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd))),
self.isq, self.dwdt)
dae.add_jac(
Fy,
mul(
div(1, self.Teq), div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd))), self.isq,
self.pwa)
dae.add_jac(
Fy, -mul(self.Kdc, div(1, self.Teq), div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd))),
self.isq, self.v1)
dae.add_jac(
Fy,
mul(
self.xd, div(1, self.Teq), div(1, dae.x[self.omega_m]),
(self.psip - mul(dae.y[self.isd], self.xd))**-2,
dae.y[self.pwa] - mul(self.Kcoi, dae.y[self.dwdt_coi]) - mul(
self.Kdc, dae.y[self.v1] - dae.y[self.v2]) - mul(
self.Ki, dae.y[self.dwdt])), self.isq, self.isd)
dae.add_jac(
Fy, -mul(self.Kcoi, div(1, self.Teq), div(1, dae.x[self.omega_m]),
div(1, self.psip - mul(dae.y[self.isd], self.xd))),
self.isq, self.dwdt_coi)
def jac0(self, dae):
Turbine.jac0(self, dae)
MPPT.jac0(self, dae)
dae.add_jac(Gy0, -1, self.vsd, self.vsd)
dae.add_jac(Gy0, -self.rs, self.vsd, self.isd)
dae.add_jac(Gy0, -1, self.vsq, self.vsq)
dae.add_jac(Gy0, -1, self.ps, self.ps)
dae.add_jac(Gy0, -1, self.te, self.te)
dae.add_jac(Gx0, -self.rs, self.vsq, self.isq)
dae.add_jac(Fx0, -div(1, self.Teq), self.isq, self.isq)
dae.add_jac(Fy0, mul(-0.5, div(1, self.H)), self.omega_m, self.te)
dae.add_jac(Gy0, 1e-6, self.isd, self.isd)
dae.add_jac(Gy0, 1e-6, self.vsd, self.vsd)
dae.add_jac(Gy0, 1e-6, self.vsq, self.vsq)
dae.add_jac(Gy0, 1e-6, self.ps, self.ps)
dae.add_jac(Gy0, 1e-6, self.te, self.te)
class WTG3(ModelBase):
"""Wind turbine type III"""
def __init__(self, system, name):
super().__init__(system, name)
self._group = 'WTG'
self._name = 'WTG3'
self._algebs.extend([
'isd', 'isq', 'vrd', 'vrq', 'vsd', 'vsq', 'vref', 'pwa', 'pw',
'cp', 'lamb', 'ilamb'
])
self._fnamex.extend(['\\theta_p', '\\omega_m', 'I_{r, d}', 'I_{r, q}'])
self._fnamey.extend([
'I_{s, d}', 'I_{s, q}', 'V_{r, d}', 'V_{r, q}', 'V_{s, d}',
'V_{s, q}', 'V_{ref}', 'P_{\\omega a}', 'P_w', 'c_p', '\\lambda',
'\\frac{1}{\\lambda}', '\\omega_{ref}'
])
self._mandatory.extend(['bus', 'gen', 'wind'])
self._params.extend([
'fn', 'Kp', 'nblade', 'ngen', 'npole', 'R', 'Tp', 'Ts', 'ngb', 'H',
'rr', 'rs', 'xr', 'xs', 'xmu', 'Te', 'KV', 'pmax', 'pmin', 'qmax',
'qmin', 'gammap', 'gammaq'
])
self._powers.extend(['H', 'pmax', 'pmin', 'qmax', 'qmin'])
self._service.extend([
'u0', 'vref0', 'irq_min', 'ird_min', 'phi', 'fn', 'ird_max', 'x0',
'irq_max', 'pi', 'irq_off', 'mva_mega', 'x1'
])
self._states.extend(['theta_p', 'omega_m', 'ird', 'irq'])
self._times.extend(['Tp', 'Te'])
self._z.extend(['rs', 'xs', 'rr', 'xr', 'xmu'])
self._ac.update({'bus': ['a', 'v']})
self._data.update({
'fn': 60,
'rs': 0.01,
'xmu': 3,
'R': 35,
'ngb': 0.011235,
'gammap': 1,
'npole': 4,
'qmin': -0.6,
'KV': 10,
'xr': 0.08,
'Te': 0.01,
'pmin': 0,
'Ts': 1,
'Sn': 40,
'wind': 0,
'gen': 0,
'rr': 0.01,
'pmax': 1.0,
'gammaq': 1,
'Kp': 10,
'xs': 0.1,
'H': 2,
'Tp': 3.0,
'qmax': 0.6,
'nblade': 3,
'bus': 0,
'ngen': 40,
'u': 1
})
self._descr.update({
'fn': 'Base frequency',
'rs': 'Stator resistance',
'xmu': 'Magnetizing reactance',
'R': 'Rotor radius',
'pmax': 'Maximum active power',
'gammap': 'Active power generation ratio',
'npole': 'Number of poles',
'qmin': 'Minimum reactive power',
'KV': 'Voltage control gain',
'xr': 'Rotor reactance',
'Te': 'Power control time constant',
'pmin': 'Minimum reactive power',
'Ts': 'Speed control time constant',
'wind': 'Wind time series idx',
'gen': 'Static generator idx',
'rr': 'Rotor resistance',
'ngb': 'Gear box ratio',
'gammaq': 'Reactive power generation ratio',
'Kp': 'Pitch control gain',
'xs': 'Stator reactance',
'H': 'Machine rotor and turbine inertia constant',
'Tp': 'Pitch control time constant',
'qmax': 'Maximum active power',
'nblade': 'Number of blades',
'bus': 'Bus idx',
'ngen': 'Number of generators'
})
self._units.update({
'fn': 'Hz',
'rs': 'pu',
'xmu': 'pu',
'rr': 'pu',
'R': 'm',
'pmax': 'pu',
'qmin': 'pu',
'Kp': 'pu',
'xs': 'pu',
'qmax': 'pu',
'H': 'MWs/MVA',
'Tp': 's',
'KV': 'pu',
'Te': 's',
'xr': 'pu',
'pmin': 'pu'
})
self.calls.update({
'init1': True,
'gycall': True,
'fxcall': True,
'fcall': True,
'gcall': True,
'jac0': True
})
self._init()
def servcall(self, dae):
self.copy_data_ext('StaticGen', 'u', 'ugen', self.gen)
self.copy_data_ext('Bus', 'Pg', 'p0', self.bus)
self.copy_data_ext('Bus', 'Qg', 'q0', self.bus)
self.copy_data_ext('Wind', 'vw', 'vw', self.wind)
self.copy_data_ext('Wind', 'rho', 'rho', self.wind)
self.copy_data_ext('Wind', 'Vwn', 'Vwn', self.wind)
self.vref0 = dae.y[self.v]
self.x0 = self.xmu + self.xs
self.x1 = | |
<reponame>lygztq/gomoku
# coding=utf-8
from __future__ import print_function
import numpy as np
class Board(object):
"""Board class for training.
Attributes:
width: The width of board.
height: The height of board.
numberToWin: How many stones need on a line to win
"""
kPlayerWhite = 0
kPlayerBlack = 1
kEmpty = -1
kStoneChar = {
1: '@',
0: 'O',
-1: '+'
}
def __init__(self, **kwargs):
"""
@param kwargs: The dictionary of args
width: The width of board
height: The height of board
numberToWin: How many stones need on a line to win
"""
self.__width = int(kwargs.get('width', 15))
self.__height = int(kwargs.get('height', 15))
self.numberToWin = int(kwargs.get('numberToWin', 5))
# states: board states stored as dictionary
# key: moves as location on the board
# value: player as pieces type
self.initBoard()
def initBoard(self, start_player=None):
# Do some check
if start_player == None:
start_player = Board.kPlayerBlack
if self.__width < self.numberToWin or self.__height < self.numberToWin:
raise Exception("Board width({}) or height({}) can not be less than {}".format(
self.__width, self.__height, self.numberToWin))
self.__current_player = start_player
self.availables = list(
range(self.__width * self.__height)) # Valid moves
self.moved = [] # Moves that already have stone on it
self.states = {}
self.__last_move = None # Last position
def isValidMove(self, move):
if not isinstance(move, int):
return False
if move < 0 or move >= self.__width * self.__height:
return False
if move in self.moved:
return False
return True
def moveToLocation(self, move):
"""
Two different types of index: move and location
move = h_index * width + w_index
location = [h_index, w_index]
NOTE: If input move is None or invalid, this method will return None
"""
if move is None:
return None
if move < 0 or move >= self.__width * self.__height:
return None
h_index = move // self.__width
w_index = move % self.__width
return [h_index, w_index]
def locationToMove(self, location):
if location is None:
return None
if len(location) != 2:
return None
h_index, w_index = location
move = h_index * self.__width + w_index
if move < 0 or move >= self.__width * self.__height:
return None
return move
def __changePlayer(self):
self.__current_player = 1 - self.__current_player
def play(self, move):
if move in self.moved:
return False
self.states[move] = self.__current_player
self.availables.remove(move)
self.moved.append(move)
self.__changePlayer()
self.__last_move = move
return True
def undo(self):
if not self.moved:
return False
del self.states[self.__last_move]
self.availables.append(self.__last_move)
self.moved = self.moved[:-1]
self.__changePlayer()
if self.moved:
self.__last_move = self.moved[-1]
else:
self.__last_move = None
return True
def currentState(self):
"""
Return the board state from the perspective of the current player.
state shape: 4 * height * width
board_state[0]: current board state with only current player's stone
board_state[1]: current board state with only opponent's stones
board_state[2]: only one stone, indicate the last move(opponent made this move).
board_state[3]: indicate the player to play, 0 for white, 1 for black
"""
board_state = np.zeros((4, self.__height, self.__width))
if self.states: # if self.states is not empty
moves, players = np.array(list(zip(*self.states.items())))
curr_moves = moves[players == self.__current_player]
oppo_moves = moves[players != self.__current_player]
board_state[0][curr_moves // self.__width,
curr_moves % self.__width] = 1
board_state[1][oppo_moves // self.__width,
oppo_moves % self.__width] = 1
board_state[2][self.__last_move // self.__width,
self.__last_move % self.__width] = 1
if self.__current_player == Board.kPlayerBlack:
board_state[3] += 1
return board_state
def fastGetWinner(self):
"""
If the game is plain sailing, i.e. the only operation is play stone and remove stone from board,
then the last move will end the game, and only the last move can determine the winner.
"""
if len(self.moved) < 2*self.numberToWin-1: # No player has put numberToWin stones on the board
return None
# Horizontal
for m in self.moved[::-1][:2]:
last_player = self.states[m]
# Here we try to find a interval whose elements are have same color with last_player, the interval is [)
left_bd = self.__last_move
right_bd = self.__last_move + 1
while self.states.get(left_bd-1, Board.kEmpty) == last_player and left_bd % self.__width != 0:
left_bd -= 1
while self.states.get(right_bd, Board.kEmpty) == last_player and right_bd % self.__width != 0:
right_bd += 1
if (right_bd - left_bd) >= self.numberToWin:
return last_player
# Vertical
left_bd = self.__last_move
right_bd = self.__last_move + self.__width
count = 1
while self.states.get(left_bd - self.__width, Board.kEmpty) == last_player:
left_bd -= self.__width
count += 1
while self.states.get(right_bd, Board.kEmpty) == last_player:
right_bd += self.__width
count += 1
if count >= self.numberToWin:
return last_player
# main diagonal
left_bd = self.__last_move
right_bd = self.__last_move + self.__width + 1
count = 1
while self.states.get(left_bd - 1 - self.__width, Board.kEmpty) == last_player and left_bd % self.__width != 0:
left_bd -= (self.__width + 1)
count += 1
while self.states.get(right_bd, Board.kEmpty) == last_player and right_bd % self.__width != 0:
right_bd += (self.__width + 1)
count += 1
if count >= self.numberToWin:
return last_player
# deputy diagonal
left_bd = self.__last_move
right_bd = self.__last_move + self.__width - 1
count = 1
while self.states.get(left_bd + 1 - self.__width, Board.kEmpty) == last_player and left_bd % self.__width != self.__width - 1:
left_bd -= (self.__width - 1)
count += 1
while self.states.get(right_bd, Board.kEmpty) == last_player and right_bd % self.__width != self.__width - 1:
right_bd += (self.__width - 1)
count += 1
if count >= self.numberToWin:
return last_player
return None
def getWinner(self):
"""
Give a board and determine the winner. If no winner, return None.
"""
if len(self.moved) < 2*self.numberToWin-1: # No player has put numberToWin stones on the board
return None
for m in self.moved[::-1]:
curr_player = self.states[m]
# Horizontal
# Here we try to find a interval whose elements are have same color with last_player, the interval is [)
left_bd = m
right_bd = m + 1
while self.states.get(left_bd-1, Board.kEmpty) == curr_player and left_bd % self.__width != 0:
left_bd -= 1
while self.states.get(right_bd, Board.kEmpty) == curr_player and right_bd % self.__width != 0:
right_bd += 1
if (right_bd - left_bd) >= self.numberToWin:
return curr_player
# Vertical
left_bd = m
right_bd = m + self.__width
count = 1
while self.states.get(left_bd - self.__width, Board.kEmpty) == curr_player:
left_bd -= self.__width
count += 1
while self.states.get(right_bd, Board.kEmpty) == curr_player:
right_bd += self.__width
count += 1
if count >= self.numberToWin:
return curr_player
# main diagonal
left_bd = m
right_bd = m + self.__width + 1
count = 1
while self.states.get(left_bd - 1 - self.__width, Board.kEmpty) == curr_player and left_bd % self.__width != 0:
left_bd -= (self.__width + 1)
count += 1
while self.states.get(right_bd, Board.kEmpty) == curr_player and right_bd % self.__width != 0:
right_bd += (self.__width + 1)
count += 1
if count >= self.numberToWin:
return curr_player
# deputy diagonal
left_bd = m
right_bd = m + self.__width - 1
count = 1
while self.states.get(left_bd + 1 - self.__width, Board.kEmpty) == curr_player and left_bd % self.__width != self.__width - 1:
left_bd -= (self.__width - 1)
count += 1
while self.states.get(right_bd, Board.kEmpty) == curr_player and right_bd % self.__width != self.__width - 1:
right_bd += (self.__width - 1)
count += 1
if count >= self.numberToWin:
return curr_player
return None
def gameEnd(self):
"""
Check whether the game is terminal. Return a boolean value and the winner.
If no winner, return None.
"""
winner = self.fastGetWinner()
if winner is not None:
return True, winner
elif not self.availables:
return True, None
else:
return False, None
def printBoard(self):
print("Current turn: [{}]".format(Board.kStoneChar[self.__current_player]))
curr_state = np.zeros([self.__width, self.__height],
dtype=np.int) + Board.kEmpty
if self.__last_move:
last_h_idx, last_w_idx = self.moveToLocation(self.__last_move)
else:
last_h_idx, last_w_idx = -1, -1
if self.states:
moves, players = np.array(list(zip(*self.states.items())))
black_moves = moves[players == Board.kPlayerBlack]
white_moves = moves[players == Board.kPlayerWhite]
curr_state[black_moves // self.__width, black_moves %
self.__width] = Board.kPlayerBlack
curr_state[white_moves // self.__width, white_moves %
self.__width] = Board.kPlayerWhite
for w in range(self.__width):
print("{0:8d}".format(w), end='')
print('\n\n')
for h in range(self.__height):
print("{0:4d}".format(h), end='')
for w in range(self.__width):
if curr_state[h, w] == Board.kEmpty:
print("+".center(8), end='')
elif curr_state[h, w] == Board.kPlayerBlack:
if h == last_h_idx and w == last_w_idx:
print("[@]".center(8), end='')
else:
print("@".center(8), end='')
else:
if h == last_h_idx and w == last_w_idx:
print("[O]".center(8), end='')
else:
print("O".center(8), end='')
print("{0:4d}".format(h), end='\n\n\n')
for w in range(self.__width):
print("{0:8d}".format(w), end='')
print('\n')
@property
def current_player(self):
return self.__current_player
@property
def last_move_location(self):
return self.moveToLocation(self.__last_move)
@property
def last_move(self):
return self.__last_move
@property
def width(self):
return self.__width
@property
def height(self):
return self.__height
@property
def is_empty(self):
| |
code += \
"\tsw $a0 0($sp)\n" + \
"\taddiu $sp $sp -4\n"
code += ctx.getChild(2).accept(self)
code += \
"\tmove $t2 $a0\n" \
"\tlw $t1 4($sp)\n" \
"\taddiu $sp $sp 4\n"\
"\tla $a0 bool_const1\n" \
"\tla $a1 bool_const0\n"
if self.CurrentType == "String" or self.CurrentType == "Int" or self.CurrentType == "Bool":
code += "\tjal equality_test\n"
else:
code += \
"\tsub $t3 $t2 $t1\n"\
"\tmovn $a0 $a1 $t3\n"
self.CurrentType = "Bool"
return code
# Visit a parse tree produced by COOL#true.
def visitTrue(self, ctx: COOL.TrueContext):
self.CurrentType = "Bool"
return "\tla $a0 bool_const1\n"
# Visit a parse tree produced by COOL#lessEqual.
def visitLessEqual(self, ctx: COOL.LessEqualContext):
code = ctx.getChild(0).accept(self)
code += \
"\tsw $a0 0($sp)\n" + \
"\taddiu $sp $sp -4\n"
code += ctx.getChild(2).accept(self)
code += \
"\tjal Object.copy\n" \
"\tlw $t0 4($sp)\n" \
"\tlw $t1 12($t0)\n" \
"\tlw $t2 12($a0)\n" \
"\tslt $t3 $t2 $t1\n" \
"\tnegu $t4 $t3\n" \
"\taddiu $t5 $t4 1\n" \
"\tla $a0 bool_const0\n" \
"\tjal Object.copy\n" \
"\tsw $t5 12($a0)\n" \
"\taddiu $sp $sp 4\n"
self.CurrentType = "Bool"
return code
# Visit a parse tree produced by COOL#methodCall.
def visitMethodCall(self, ctx: COOL.MethodCallContext):
length = 5
methodPos = 2
objCode = ctx.getChild(0).accept(self)
code = ""
if self.CurrentType == "SELF_TYPE":
self.CurrentType = self.CurrentClass.Name
classId = self.CurrentType
if ctx.getChild(1).symbol.text == "@":
length += 2
methodPos += 2
classId = ctx.getChild(2).symbol.text
count = methodPos + 2
if len(ctx.children) > length:
length = len(ctx.children)
while length != count:
param = (ctx.getChild(count).accept(self) or "")
code += param + \
f"\tsw $a0 0($sp)\n" \
f"\taddiu $sp $sp -4\n"
count = count + 2
methodIdx = list(self.TypeTable[classId].TagMethods).index(ctx.getChild(methodPos).symbol.text)
self.CurrentType = self.searchMethodInfo(classId, ctx.getChild(methodPos).symbol.text)
if ctx.getChild(1).symbol.text == "@":
code += objCode + \
f"\tla $t1 {classId}_dispTab\n" \
f"\tlw $t1 {methodIdx * 4}($t1)\n" \
"\tjalr $t1\n"
else:
code += objCode + \
f"\tlw $t1 8($a0)\n" \
f"\tlw $t1 {methodIdx * 4}($t1)\n" \
"\tjalr $t1\n"
return code
RuntimeMessages = """
#
# Messages for the Runtime
#
.data
_abort_msg: .asciiz "Abort called from class "
_colon_msg: .asciiz ":"
_dispatch_msg: .asciiz ": Dispatch to void.\\n"
_cabort_msg: .asciiz "No match in case statement for Class "
_cabort_msg2: .asciiz "Match on void in case statement.\\n"
_nl: .asciiz "\\n"
_term_msg: .asciiz "\nCOOL program successfully executed\\n"
_sabort_msg1: .asciiz "Index to substr is negative\\n"
_sabort_msg2: .asciiz "Index to substr is too big\\n"
_sabort_msg3: .asciiz "Length to substr too long\\n"
_sabort_msg4: .asciiz "Length to substr is negative\\n"
_sabort_msg: .asciiz "Execution aborted.\\n"
_objcopy_msg: .asciiz "Object.copy: Invalid object size.\\n"
_gc_abort_msg: .asciiz "GC bug!\\n"
#
# Messages for the GenGC garabge collector
#
_GenGC_INITERROR: .asciiz "GenGC: Unable to initialize the garbage collector.\\n"
_GenGC_COLLECT: .asciiz "Garbage collecting ...\\n"
_GenGC_Major: .asciiz "Major ...\\n"
_GenGC_Minor: .asciiz "Minor ...\\n"
#_GenGC_COLLECT: .asciiz ""
_GenGC_MINORERROR: .asciiz "GenGC: Error during minor garbage collection.\\n"
_GenGC_MAJORERROR: .asciiz "GenGC: Error during major garbage collection.\\n"
#
# Messages for the NoGC garabge collector
#
_NoGC_COLLECT: .asciiz "Increasing heap...\\n"
#_NoGC_COLLECT: .asciiz ""
"""
MemManagerNoGCInit = """
.data
.align 2
.globl _MemMgr_INITIALIZER
_MemMgr_INITIALIZER:
.word _NoGC_Init
.globl _MemMgr_COLLECTOR
_MemMgr_COLLECTOR:
.word _NoGC_Collect
.globl _MemMgr_TEST
_MemMgr_TEST:
.word 0
"""
MemManagerGenGCInit = """
.data
.align 2
.globl _MemMgr_INITIALIZER
_MemMgr_INITIALIZER:
.word _GenGC_Init
.globl _MemMgr_COLLECTOR
_MemMgr_COLLECTOR:
.word _GenGC_Collect
.globl _MemMgr_TEST
_MemMgr_TEST:
.word 0
"""
RuntimeCode = """
#
# Define some constants
#
obj_eyecatch=-4 # Unique id to verify any object
obj_tag=0
obj_size=4
obj_disp=8
obj_attr=12
int_slot=12
bool_slot=12
str_size=12 # This is a pointer to an Int object!!!
str_field=16 # The beginning of the ascii sequence
str_maxsize=1026 # the maximum string length
#
# The REG mask tells the garbage collector which register(s) it
# should automatically update on a garbage collection. Note that
# this is (ANDed) with the ARU mask before the garbage collector
# reads it. Only the registers specified in the garbage collector's
# ARU mask can be automatically updated.
#
# BITS----------------------------
# 3 2 1 0
# 10987654321098765432109876543210
# --------------------------------
#
# 00000000011111110000000000000000 <- initial Register (REG) mask
# +--++--++--++--++--++--++--++--+ $s0-$s6
# 0 0 7 F 0 0 0 0 ($16-$22)
#
MemMgr_REG_MASK=0x007F0000
.text
.globl main
main:
li $v0 9
move $a0 $zero
syscall # sbrk
move $a0 $sp # initialize the garbage collector
li $a1 MemMgr_REG_MASK
move $a2 $v0
jal _MemMgr_Init # sets $gp and $s7 (limit)
la $a0 Main_protObj # create the Main object
jal Object.copy # Call copy
addiu $sp $sp -4
sw $a0 4($sp) # save the Main object on the stack
move $s0 $a0 # set $s0 to point to self
jal Main_init # initialize the Main object
jal Main.main # Invoke main method
addiu $sp $sp 4 # restore the stack
la $a0 _term_msg # show terminal message
li $v0 4
syscall
li $v0 10
syscall # syscall 10 (exit)
#
# Polymorphic equality testing function:
# Two objects are equal if they are
# - identical (pointer equality, inlined in code)
# - have same tag and are of type BOOL,STRING,INT and contain the
# same data
#
# INPUT: The two objects are passed in $t1 and $t2
# OUTPUT: Initial value of $a0, if the objects are equal
# Initial value of $a1, otherwise
#
# The tags for Int,Bool,String are found in the global locations
# _int_tag, _bool_tag, _string_tag, which are initialized by the
# data part of the generated code. This removes a consistency problem
# between this file and the generated code.
#
.globl equality_test
equality_test: # ops in $t1 $t2
# true in A0, false in A1
# assume $t1, $t2 are not equal
beq $t1 $zero _eq_false # $t2 can't also be void
beq $t2 $zero _eq_false # $t1 can't also be void
lw $v0 obj_tag($t1) # get tags
lw $v1 obj_tag($t2)
bne $v1 $v0 _eq_false # compare tags
lw $a2 _int_tag # load int tag
beq $v1 $a2 _eq_int # Integers
lw $a2 _bool_tag # load bool tag
beq $v1 $a2 _eq_int # Booleans
lw $a2 _string_tag # load string tag
bne $v1 $a2 _eq_false # Not a primitive type
_eq_str: # handle strings
lw $v0, str_size($t1) # get string size objs
lw $v1, str_size($t2)
lw $v0, int_slot($v0) # get string sizes
lw $v1, int_slot($v1)
bne $v1 $v0 _eq_false
beqz $v1 _eq_true # 0 length strings are equal
add $t1 str_field # Point to start of string
add $t2 str_field
move $t0 $v0 # Keep string length as counter
_eq_l1:
lbu $v0,0($t1) # get char
add $t1 1
lbu $v1,0($t2)
add $t2 1
bne $v1 $v0 _eq_false
addiu $t0 $t0 -1 # Decrement counter
bnez $t0 _eq_l1
b _eq_true # end of strings
_eq_int: # handles booleans and ints
lw $v0,int_slot($t1) # load values
lw $v1,int_slot($t2)
bne $v1 $v0 _eq_false
_eq_true:
jr $ra # return true
_eq_false:
move $a0 $a1 # move false into accumulator
jr $ra
#
# _dispatch_abort
#
# filename in $a0
# line number in $t1
#
# Prints error message and exits.
# Called on dispatch to void.
#
.globl _dispatch_abort
_dispatch_abort:
sw $t1 0($sp) # save line number
addiu $sp $sp -4
addiu $a0 $a0 str_field # adjust to beginning of string
li $v0 4
syscall # print file name
la $a0 _colon_msg
li $v0 4
syscall # print ":"
lw $a0 4($sp) #
li $v0 1
syscall # print line number
li $v0 4
la $a0 _dispatch_msg
syscall # print dispatch-to-void message
li $v0 10
syscall # exit
#
# _case_abort2
#
# filename in $a0
# line number in $t1
#
# Prints error message and exits.
# Called on case on void.
#
.globl _case_abort2
_case_abort2:
sw $t1 0($sp) # save line number
addiu $sp $sp -4
addiu $a0 $a0 str_field # adjust to beginning of string
li $v0 4
syscall # print file name
la $a0 _colon_msg
li $v0 4
syscall # print ":"
lw $a0 4($sp) #
li $v0 1
syscall # print line number
li $v0 4
la $a0 _cabort_msg2
syscall # print case-on-void message
li $v0 10
syscall # exit
#
#
# _case_abort
# Is called when a case statement has no match
#
# INPUT: $a0 contains the object on which the case was
# performed
#
# Does not return!
#
.globl _case_abort
_case_abort: # $a0 contains case expression obj.
move $s0 $a0 # save the expression object
la $a0 _cabort_msg
li $v0 4
syscall # print_str
la $t1 class_nameTab
lw $v0 obj_tag($s0) # Get object tag
sll $v0 $v0 2 # *4
addu $t1 $t1 $v0
lw $t1 0($t1) # Load class name string obj.
addiu $a0 $t1 str_field # Adjust to beginning of str
li $v0 4 # print_str
syscall
la $a0 _nl
li $v0 4 # print_str
syscall
li $v0 10
syscall # Exit
#
# Copy method
#
# Copies an object and returns a pointer to a new object in
# the heap. Note that to increase performance, the stack frame
# is not set up unless it is absolutely needed. As a result,
# the frame is setup just before the call to "_MemMgr_Alloc" and
# is destroyed just after it. The increase in performance
# occurs becuase the calls to "_MemMgr_Alloc" happen very
# infrequently when the heap needs to be garbage collected.
#
# INPUT: $a0: object to be copied to free space in heap
#
# OUTPUT: $a0: points to the newly created copy.
#
# Registers modified:
# $t0, $t1, $t2, $v0, $v1, $a0, $a1, $a2, $gp, $s7
#
.globl Object.copy
Object.copy:
addiu $sp $sp -8 # create stack frame
sw $ra 8($sp)
sw $a0 4($sp)
jal _MemMgr_Test # test GC area
lw $a0 4($sp) # get object size
lw $a0 obj_size($a0)
blez $a0 _objcopy_error # check for invalid size
sll $a0 $a0 2 # convert words to bytes
addiu $a0 $a0 4 # account for eyecatcher
jal _MemMgr_Alloc # allocate storage
addiu $a1 $a0 4 # pointer to new object
lw $a0 4($sp) # the self object
lw $ra 8($sp) # restore return address
addiu $sp $sp 8 # remove frame
lw $t0 obj_size($a0) # get size of object
sll $t0 $t0 2 # convert words to bytes
b _objcopy_allocated # get on with the copy
# A faster version of Object.copy, for internal use (does not call
# _MemMgr_Test, and if possible not _MemMgr_Alloc)
_quick_copy:
lw $t0 obj_size($a0) # get size of object to copy
blez $t0 _objcopy_error # check for invalid size
sll $t0 $t0 2 # convert words to bytes
addiu $t1 $t0 4 # account for eyecatcher
add $gp $gp $t1 # allocate memory
sub $a1 $gp $t0 # pointer to new object
blt $gp $s7 _objcopy_allocated # check allocation
_objcopy_allocate:
sub $gp $a1 4 # restore the original $gp
addiu $sp $sp -8 # frame size
sw $ra 8($sp) # save return address
sw $a0 4($sp) # save self
move $a0 $t1 # put bytes to allocate in $a0
jal _MemMgr_Alloc # allocate storage
addiu $a1 $a0 4 # | |
<reponame>gist-ailab/bop_toolkit<gh_stars>100-1000
# Author: <NAME> (<EMAIL>)
# Tsinghua University
# Adapted based on the glumpy version: "./renderer_py.py"
"""A Python Vispy based renderer."""
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import vispy
from vispy import app, gloo
import OpenGL.GL as gl
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
from bop_toolkit_lib import renderer
# app backends: glfw, pyglet, egl
# gl backends: gl2, pyopengl2, gl+
app_backend = "egl"
gl_backend = "gl2" # "pyopengl2" # speed: 'gl+' < 'gl2' < 'pyopengl2'
vispy.use(app=app_backend, gl=gl_backend)
print("vispy uses app: {}, gl: {}".format(app_backend, gl_backend))
# RGB vertex shader.
_rgb_vertex_code = """
uniform mat4 u_mv;
uniform mat4 u_nm;
uniform mat4 u_mvp;
uniform vec3 u_light_eye_pos;
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec3 a_color;
attribute vec2 a_texcoord;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
varying vec3 v_normal;
void main() {
gl_Position = u_mvp * vec4(a_position, 1.0);
v_color = a_color;
v_texcoord = a_texcoord;
// The following points/vectors are expressed in the eye coordinates.
v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // Vertex.
v_L = normalize(u_light_eye_pos - v_eye_pos); // Vector to the light.
v_normal = normalize(u_nm * vec4(a_normal, 1.0)).xyz; // Normal vector.
}
"""
# RGB fragment shader - flat shading.
_rgb_fragment_flat_code = """
uniform float u_light_ambient_w;
uniform sampler2D u_texture;
uniform int u_use_texture;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
void main() {
// Face normal in eye coords.
vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos)));
float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0);
float light_w = u_light_ambient_w + light_diffuse_w;
if(light_w > 1.0) light_w = 1.0;
if(bool(u_use_texture)) {
gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));
}
else {
gl_FragColor = vec4(light_w * v_color, 1.0);
}
}
"""
# RGB fragment shader - Phong shading.
_rgb_fragment_phong_code = """
uniform float u_light_ambient_w;
uniform sampler2D u_texture;
uniform int u_use_texture;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
varying vec3 v_normal;
void main() {
float light_diffuse_w = max(dot(normalize(v_L), normalize(v_normal)), 0.0);
float light_w = u_light_ambient_w + light_diffuse_w;
if(light_w > 1.0) light_w = 1.0;
if(bool(u_use_texture)) {
gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));
}
else {
gl_FragColor = vec4(light_w * v_color, 1.0);
}
}
"""
# Depth vertex shader.
# Ref: https://github.com/julienr/vertex_visibility/blob/master/depth.py
#
# Getting the depth from the depth buffer in OpenGL is doable, see here:
# http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real
# http://web.archive.org/web/20130426093607/http://www.songho.ca/opengl/gl_projectionmatrix.html
# http://stackoverflow.com/a/6657284/116067
# but it is difficult to achieve high precision, as explained in this article:
# http://dev.theomader.com/depth-precision/
#
# Once the vertex is in the view coordinates (view * model * v), its depth is
# simply the Z axis. Hence, instead of reading from the depth buffer and undoing
# the projection matrix, we store the Z coord of each vertex in the color
# buffer. OpenGL allows for float32 color buffer components.
_depth_vertex_code = """
uniform mat4 u_mv;
uniform mat4 u_mvp;
attribute vec3 a_position;
attribute vec3 a_color;
varying float v_eye_depth;
void main() {
gl_Position = u_mvp * vec4(a_position, 1.0);
vec3 v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // In eye coords.
// OpenGL Z axis goes out of the screen, so depths are negative
v_eye_depth = -v_eye_pos.z;
}
"""
# Depth fragment shader.
_depth_fragment_code = """
varying float v_eye_depth;
void main() {
gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0);
}
"""
# Functions to calculate transformation matrices.
# Note that OpenGL expects the matrices to be saved column-wise.
# (Ref: http://www.songho.ca/opengl/gl_transform.html)
def _calc_model_view(model, view):
"""Calculates the model-view matrix.
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:return: 4x4 ndarray with the model-view matrix.
"""
return np.dot(model, view)
def _calc_model_view_proj(model, view, proj):
"""Calculates the model-view-projection matrix.
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:param proj: 4x4 ndarray with the projection matrix.
:return: 4x4 ndarray with the model-view-projection matrix.
"""
return np.dot(np.dot(model, view), proj)
def _calc_normal_matrix(model, view):
"""Calculates the normal matrix.
Ref: http://www.songho.ca/opengl/gl_normaltransform.html
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:return: 4x4 ndarray with the normal matrix.
"""
return np.linalg.inv(np.dot(model, view)).T
def _calc_calib_proj(K, x0, y0, w, h, nc, fc, window_coords="y_down"):
"""Conversion of Hartley-Zisserman intrinsic matrix to OpenGL proj. matrix.
Ref:
1) https://strawlab.org/2011/11/05/augmented-reality-with-OpenGL
2) https://github.com/strawlab/opengl-hz/blob/master/src/calib_test_utils.py
:param K: 3x3 ndarray with the intrinsic camera matrix.
:param x0 The X coordinate of the camera image origin (typically 0).
:param y0: The Y coordinate of the camera image origin (typically 0).
:param w: Image width.
:param h: Image height.
:param nc: Near clipping plane.
:param fc: Far clipping plane.
:param window_coords: 'y_up' or 'y_down'.
:return: 4x4 ndarray with the OpenGL projection matrix.
"""
depth = float(fc - nc)
q = -(fc + nc) / depth
qn = -2 * (fc * nc) / depth
# Draw our images upside down, so that all the pixel-based coordinate
# systems are the same.
if window_coords == "y_up":
proj = np.array(
[
[2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],
[0, -2 * K[1, 1] / h, (-2 * K[1, 2] + h + 2 * y0) / h, 0],
[0, 0, q, qn], # Sets near and far planes (glPerspective).
[0, 0, -1, 0],
]
)
# Draw the images upright and modify the projection matrix so that OpenGL
# will generate window coords that compensate for the flipped image coords.
else:
assert window_coords == "y_down"
proj = np.array(
[
[2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],
[0, 2 * K[1, 1] / h, (2 * K[1, 2] - h + 2 * y0) / h, 0],
[0, 0, q, qn], # Sets near and far planes (glPerspective).
[0, 0, -1, 0],
]
)
return proj.T
def singleton(cls):
instances = {}
def get_instance(width, height, mode="rgb+depth", shading="phong", bg_color=(0.0, 0.0, 0.0, 0.0)):
if cls not in instances:
instances[cls] = cls(width, height, mode, shading, bg_color)
return instances[cls]
return get_instance
@singleton # Don't throw GL context into trash when having more than one Renderer instance
class RendererVispy(renderer.Renderer, app.Canvas):
"""A Python based renderer."""
def __init__(self, width, height, mode="rgb+depth", shading="phong", bg_color=(0.0, 0.0, 0.0, 0.0)):
"""Constructor.
:param width: Width of the rendered image.
:param height: Height of the rendered image.
:param mode: Rendering mode ('rgb+depth', 'rgb', 'depth').
:param shading: Type of shading ('flat', 'phong').
:param bg_color: Color of the background (R, G, B, A).
"""
renderer.Renderer.__init__(self, width=width, height=height)
app.Canvas.__init__(self, show=False, size=(width, height))
self.mode = mode
self.shading = shading
self.bg_color = bg_color
# yz flip: opencv to opengl
pose_cv_to_gl = np.eye(4, dtype=np.float32)
pose_cv_to_gl[1, 1], pose_cv_to_gl[2, 2] = -1, -1
self.pose_cv_to_gl = pose_cv_to_gl
# Indicators whether to render RGB and/or depth image.
self.render_rgb = self.mode in ["rgb", "rgb+depth"]
self.render_depth = self.mode in ["depth", "rgb+depth"]
# Structures to store object models and related info.
self.models = {}
self.model_bbox_corners = {}
self.model_textures = {}
# Rendered images.
self.rgb = None
self.depth = None
# Per-object vertex and index buffer.
self.vertex_buffers = {}
self.index_buffers = {}
# Per-object OpenGL programs for rendering of RGB and depth images.
self.rgb_programs = {}
self.depth_programs = {}
# The frame buffer object.
rgb_buf = gloo.Texture2D(shape=(self.height, self.width, 3))
depth_buf = gloo.RenderBuffer(shape=(self.height, self.width))
self.fbo = gloo.FrameBuffer(color=rgb_buf, depth=depth_buf)
# Activate the created frame buffer object.
self.fbo.activate()
def add_object(self, obj_id, model_path, **kwargs):
"""See base class."""
# Color of the object model (the original color saved with the object model
# will be used if None).
surf_color = None
if "surf_color" in kwargs:
surf_color = kwargs["surf_color"]
# Load the object model.
model = inout.load_ply(model_path)
self.models[obj_id] = model
# Calculate the 3D bounding box of the model (will be used to set the near
# and far clipping plane).
bb = misc.calc_3d_bbox(model["pts"][:, 0], model["pts"][:, 1], model["pts"][:, 2])
self.model_bbox_corners[obj_id] = np.array(
[
[bb[0], bb[1], bb[2]],
[bb[0], bb[1], bb[2] + bb[5]],
[bb[0], bb[1] + bb[4], bb[2]],
[bb[0], bb[1] + bb[4], bb[2] + bb[5]],
[bb[0] + bb[3], bb[1], bb[2]],
[bb[0] + bb[3], bb[1], bb[2] + bb[5]],
[bb[0] + bb[3], bb[1] + bb[4], bb[2]],
[bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],
]
)
# Set texture/color of vertices.
self.model_textures[obj_id] = None
# Use the specified uniform surface color.
if surf_color is not None:
colors = np.tile(list(surf_color) + [1.0], [model["pts"].shape[0], 1])
# Set UV texture coordinates to dummy values.
texture_uv = np.zeros((model["pts"].shape[0], 2), np.float32)
# Use the model texture.
elif "texture_file" in self.models[obj_id].keys():
model_texture_path = os.path.join(os.path.dirname(model_path), self.models[obj_id]["texture_file"])
model_texture = | |
and isHalted not found in library. Not supported.\n')
MSP430_Error_Number = MSP430mspgcc.MSP430_Error_Number
MSP430_Error_Number.argtypes = []
MSP430_Error_Number.restype = ctypes.c_long
MSP430_Error_String = MSP430mspgcc.MSP430_Error_String
MSP430_Error_String.argtypes = [ctypes.c_long]
MSP430_Error_String.restype = ctypes.c_char_p
try:
MSP430_Secure = MSP430mspgcc.MSP430_Secure
MSP430_Secure.argtypes = []
MSP430_Secure.restype = ctypes.c_int
except AttributeError:
# mspgcc lib does not have this function
if verbose > 1:
sys.stderr.write('MSP430_Secure not found in library. Not supported.\n')
def MSP430_Secure():
raise NotImplementedError("this function is not supported with this MSP430 library")
try:
MSP430_readMAB = MSP430mspgcc.MSP430_readMAB
MSP430_readMAB.argtypes = []
MSP430_readMAB.restype = ctypes.c_int
except AttributeError:
pass
messagecallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_ushort, ctypes.c_ushort) # void f(WORD count, WORD total)
class MSP430Library(object):
"""implementation of the _parjtag module in python with the help of ctypes"""
def open(self, port = None):
"""Initilize library"""
version = ctypes.c_long(0)
if port is None:
if sys.platform == 'win32':
port = "1"
else:
port = "/dev/parport0"
if backend == CTYPES_TI and sys.platform == 'win32':
port = port.upper()
status = MSP430_Initialize(port, ctypes.byref(version))
if status != STATUS_OK:
raise IOError("Could not initialize the library (port: %s)" % port)
if verbose:
sys.stderr.write('backend library version: %d\n' % (version.value,))
if version.value == -3:
# warn if firmware and MSP430.dll are incompatible
sys.stderr.write('ERROR: FET Firmware not compatible with MSP430 library!\n')
sys.stderr.write(' Consider using --fet-update.\n')
raise IOError('FET firmware error: FET:V2, DLL:V3 missmatch')
elif version.value < 0:
# warn if firmware and MSP430.dll are incompatible
sys.stderr.write('WARNING: FET Firmware not compatible with MSP430 library!\n')
sys.stderr.write(' Consider using --fet-update.\n')
if backend == CTYPES_TI:
if interface == 'spy-bi-wire':
status = MSP430_Configure(INTERFACE_MODE, SPYBIWIRE_IF)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s (device not spi-bi-wire capable?)" % MSP430_Error_String(MSP430_Error_Number()))
elif interface == 'spy-bi-wire-jtag':
status = MSP430_Configure(INTERFACE_MODE, SPYBIWIREJTAG_IF)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s (device not spi-bi-wire capable?)" % MSP430_Error_String(MSP430_Error_Number()))
else:
# try to use auto detection
status = MSP430_Configure(INTERFACE_MODE, AUTOMATIC_IF)
if status != STATUS_OK:
# fallback to 4 wire mode
status = MSP430_Configure(INTERFACE_MODE, JTAG_IF)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s (spy-bi-wire device/connection?)" % MSP430_Error_String(MSP430_Error_Number()))
else:
if interface != 'JTAG':
raise ValueError("interface != 'JTAG' is not supported with this backend")
def connect(self,):
"""Enable JTAG and connect to target. This stops it.
This function must be called before using any other JTAG function,
or the other functions will yield unpredictable data."""
MSP430_VCC(3000)
status = MSP430_Open()
if status != STATUS_OK:
raise IOError("Can't open interface: %s" % MSP430_Error_String(MSP430_Error_Number()))
#~ status = MSP430_Configure(VERIFICATION_MODE, TRUE)
#~ if status != STATUS_OK:
#~ raise IOError("Could not configure the library: %s" % MSP430_Error_String(MSP430_Error_Number()))
if backend == CTYPES_TI:
# switch off the RAM preserve mode, to speed up operations
# it also makes the behaviour closer to mspgcc the library
status = MSP430_Configure(RAM_PRESERVE_MODE, FALSE)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s" % MSP430_Error_String(MSP430_Error_Number()))
def release(self):
"""Release the target, disable JTAG lines.
Subsequent access to the JTAG yields wrong data, until
connect() is called again.
The execution is started wherever the PC stays. Don't use this
function after Flash operations or memverify. The PC was modified
and points to an unpredictable location. Use reset() before calling
this function."""
status = MSP430_Close(TRUE)
if status != STATUS_OK:
raise IOError("Could not close the library: %s" % MSP430_Error_String(MSP430_Error_Number()))
def reset(self, execute = 0, release = 0, resets = ALL_RESETS):
"""Reset the device, optionaly start execution and/or release JTAG."""
status = MSP430_Reset(resets, execute, release)
if status != STATUS_OK:
raise IOError("Could not reset target (no connection?): %s" % MSP430_Error_String(MSP430_Error_Number()))
def memread(self, address, size):
"""Read 'size' bytes starting at the specified address.
The return value is a string with the (binary) data.
It is possible to read peripherals, RAM as well as Flash."""
if size < 0: raise ValueError("Size must not be negative")
buffer = (ctypes.c_uint8*size)();
status = MSP430_Memory(address, buffer, size, READ)
if status == STATUS_OK:
return bytearray([x for x in buffer])
else:
raise IOError("Could not read target memory: %s" % MSP430_Error_String(MSP430_Error_Number()))
def memwrite(self, address, buffer):
"""'mem' has to be a string, containing the data to write.
It is possible to write peripherals, RAM as well as Flash.
Flash must be erased before writing it with memerase()."""
if backend == CTYPES_TI:
# we want to be able to write the locked segments
status = MSP430_Configure(LOCKED_FLASH_ACCESS, 1)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s" % MSP430_Error_String(MSP430_Error_Number()))
size = len(buffer)
c_buffer = (ctypes.c_uint8*(size+2))(); # just to be sure + 2 (shouldn't be needed though)
for i in range(size): c_buffer[i] = buffer[i]
status = MSP430_Memory(address, c_buffer, size, WRITE)
if status != STATUS_OK:
raise IOError("Could not write target memory: %s" % MSP430_Error_String(MSP430_Error_Number()))
def memverify(self, address, buffer):
"""'mem' has to be a string of even length.
Verify device memory against the supplied data using PSA analysis."""
size = len(buffer)
if size & 1:
raise ValueError("Buffer must have an even length")
status = MSP430_VerifyMem(address, size, buffer)
return (status == STATUS_OK)
def memerase(self, type=ERASE_ALL, address=0xfffe, length=2):
"""Erase the Flash.
Valid modes are:
ERASE_SEGMENT = 0
ERASE_MAIN = 1
ERASE_ALL = 2
The default address and length is fine for mass and main erase.
To erase a single segment ERASE_SEGMENT and an address within that
segment must be specified. The length can be chosen larger than
one segment to erase a consecutive block of segments.
The erased segments are checked for erasure using PSA analysis."""
if backend == CTYPES_TI:
# we want to be able to write the locked segments
status = MSP430_Configure(LOCKED_FLASH_ACCESS, type != ERASE_ALL)
if status != STATUS_OK:
raise IOError("Could not configure the library: %s" % MSP430_Error_String(MSP430_Error_Number()))
status = MSP430_Erase(type, address, length)
if status != STATUS_OK:
raise IOError("Could not erase the Flash: %s" % MSP430_Error_String(MSP430_Error_Number()))
def funclet(self, code, timeout=1000):
"""Download a 'funclet' contained in the string 'code' to the target
and execute it. This function waits until the code stops on a "jmp $"
or a timeout.
Please refer to the 'funclet' documentation for the contents of the
code string.
return the runtime in seconds"""
runtime = ctypes.c_ulong()
size = len(code)
if size & 1:
raise ValueError("data must be of even size")
status = MSP430_FuncletWait(code, size, 1, timeout, ctypes.byref(runtime))
if status != STATUS_OK:
raise IOError("Could not execute code: %s" % MSP430_Error_String(MSP430_Error_Number()))
return runtime.value
def configure(self, mode, value = 0):
"""Configure the MSP430 driver."""
status = MSP430_Configure(mode, value)
if status != STATUS_OK:
raise IOError("Could not change mode: %s" % MSP430_Error_String(MSP430_Error_Number()))
def regread(self, regnum):
"""returns register value"""
value = ctypes.c_long()
status = MSP430_ReadRegister(regnum, ctypes.byref(value))
if status != STATUS_OK:
raise IOError("Could not read register: %s" % MSP430_Error_String(MSP430_Error_Number()))
return value.value
def regwrite(self, regnum, value):
"""write value to register"""
status = MSP430_WriteRegister(regnum, value);
if status != STATUS_OK:
raise IOError("Could not write register: %s" % MSP430_Error_String(MSP430_Error_Number()))
def set_flash_callback(self, function):
"""The 'function' is called with (count, total) as arguments
while the flash is written."""
if backend == CTYPES_MSPGCC:
self._callback = messagecallback(function)
#~ MSP430_Configure(FLASH_CALLBACK, ctypes.addressof(self._callback))
# hack following, close your eyes ;-)...
argtypes = MSP430_Configure.argtypes
MSP430_Configure.argtypes = [ctypes.c_long, messagecallback]
MSP430_Configure(FLASH_CALLBACK, self._callback)
MSP430_Configure.argtypes = argtypes
else:
raise JTAGException("callbacks are not supported with other libraries than mspgcc's")
def isHalted(self):
"""Check if cpu is stuck on an address."""
value = MSP430_isHalted()
return value
def secure(self):
"""burn JTAG security fuse.
Note: not reversibly. use with care.
Note: not supported by all JTAG adapters.
"""
status = MSP430_Secure()
if status != STATUS_OK:
raise IOError("Could not secure device: %s" % MSP430_Error_String(MSP430_Error_Number()))
_parjtag = MSP430Library()
# print the used backend
if verbose:
sys.stderr.write("JTAG backend: %s (%s)\n" % (backend, backend_info))
#~ if backend == CTYPES_MSPGCC:
#~ _parjtag.configure(DEBUG_OPTION, verbose)
class JTAG(object):
"""\
Wrap the MSP430Library object.
The action* methods all do output messages on stderr and they take their
settings and data from the object and not as parameters.
"""
def __init__(self):
self.showprogess = 0
self.data = None
self.verbose = 1
# ---------- direct use API ---------------
def open(self, lpt=None):
"""Initialize and open port."""
if backend is None: init_backend()
if lpt is None:
_parjtag.open()
else:
_parjtag.open(lpt)
def connect(self):
"""Connect to device."""
_parjtag.connect()
def close(self):
"""Release device from JTAG."""
if _parjtag | |
('do not identified phase')
#TODO: change if cable type has 0 R1
if lineund.LineCableID in ['CableWith0R1','Cable2With0R1']:
Line = Line + ' R1=1.00E-07 R0=0 X1=0 X0=0 B1=0 B0=0 length=1'
else:
Line = Line + ' geometry='+ lineund.DeviceNumber
Line = Line + ' length='+ lineund.Length
Line = Line + ' units='+'m'
Line = Line + ' basefreq='+'60'
DSSLinesUnder.append(Line)
csvline = Switchline+ "," +lineund.DeviceNumber+ "," + Sectionlist[linebusindex].FromNodeID.replace(".","_") + "," + Sectionlist[linebusindex].ToNodeID.replace(".","_") +","+Sectionlist[linebusindex].Phase+","+lineund.DeviceNumber+","+lineund.Length
CSVLinesUnder.append(csvline)
return DSSLinesUnder, CSVLinesUnder
def convert_transformercodes(Transformerlist):
DSSTransformerscodes = []
CSVTransformers = ['Code ID, phases, R, X, Vprimary, Vsecondary, KVA']
for transformercus in Transformerlist:
Line = 'New XfmrCode.'+transformercus.ID
if transformercus.Type in ['1']:
Line = Line + ' Phases='+'1'
else:
Line = Line + ' Phases='+'3'
Z1 = float(transformercus.Z1) * float (transformercus.KVA) / 100
Z0 = float(transformercus.Z0) * float (transformercus.KVA) / 100
XR = float(transformercus.XR)
XR0 = float(transformercus.XR0)
R1 = Z1 / math.sqrt(1 + XR * XR)
R0 = Z0 / math.sqrt(1 + XR0 * XR0)
X1 = Z1 / math.sqrt(1 + 1 / (XR * XR))
X0 = Z0 / math.sqrt(1 + 1 / (XR0 * XR0))
complex0 = complex(R0, X0)
complex1 = complex(R1, X1)
matrix = np.matrix(
[[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]]
)
a = 1 * cmath.exp(2 * math.pi * 1j / 3)
T = np.matrix([[1., 1., 1.], [1., a * a, a], [1., a, a * a]])
T_inv = T.I
Zabc = T * matrix * T_inv
Z_perc = ((Zabc.item((0, 0))) / float (transformercus.KVA)) * 100
R_perc = Z_perc.real/2
x12 = Z_perc.imag
Line = Line + ' Windings='+'2' #both cases
Line = Line + ' Wdg='+'1'
if transformercus.Type in ['1']:
Line = Line + ' kV='+transformercus.KVLLprim#'7.2'
else:
Line = Line + ' kV='+transformercus.KVLLprim#'12.47'
Line = Line + ' kVA='+transformercus.KVA
Line = Line + ' %R='+str(R_perc)
Line = Line + ' Wdg='+'2'
if transformercus.Type in ['1']:
Line = Line + ' kV='+transformercus.KVLLsec#'0.207'
else:
Line = Line + ' kV='+transformercus.KVLLsec#'0.480'
Line = Line + ' kVA='+transformercus.KVA
Line = Line + ' %R='+str(R_perc)
Line = Line + ' XHL='+str(x12)
Line = Line + ' %NoLoadLoss='+str((float(transformercus.NoLoadLosses)/(float(transformercus.KVA)))*100)
DSSTransformerscodes.append(Line)
csvtransformers = transformercus.ID + "," + transformercus.Type+ "," + str(2* R_perc) + "," + str(x12)+ "," +transformercus.KVLLprim+ "," +transformercus.KVLLsec+ "," +transformercus.KVA
CSVTransformers.append(csvtransformers)
return DSSTransformerscodes,CSVTransformers
def convert_transformer(Transformerlist, TransformerSettinglist, Sectionlist):
DSSTransformers = []
CSVTransformers2 = ['Code ID, Bus 1, Bus 2, Connection']
for trafos in TransformerSettinglist:
Line = 'New Transformer.'+trafos.DeviceNumber
Line = Line + ' XfmrCode='+trafos.EqID
Line = Line + ' Wdg='+'1'
trafoprimary = trafos.SectionID
trafoprimaryindex = -1
for i in range(len(Sectionlist)):
if trafoprimary == Sectionlist[i].SectionID:
trafoprimaryindex = i
if trafoprimaryindex == -1 :
print('Trafo bus primary not found')
Line = Line + ' Bus='+Sectionlist[trafoprimaryindex].FromNodeID.replace(".","_")
if Sectionlist[trafoprimaryindex].Phase in ['A']:
Line = Line +'.1.0'
elif Sectionlist[trafoprimaryindex].Phase in ['B']:
Line = Line +'.2.0'
elif Sectionlist[trafoprimaryindex].Phase in ['C']:
Line = Line +'.3.0'
elif Sectionlist[trafoprimaryindex].Phase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
Line = Line + ' Tap='+str(float(trafos.PrimTap)/100)
if trafos.Conn == '6':
Line = Line +' Conn=wye'
elif trafos.Conn == '0':
Line = Line +' Conn=wye'
else:
print ('connection is different')
Line = Line + ' Wdg='+'2'
Line = Line + ' Bus='+Sectionlist[trafoprimaryindex].ToNodeID.replace(".","_")
if Sectionlist[trafoprimaryindex].Phase in ['A']:
Line = Line +'.1.0'
elif Sectionlist[trafoprimaryindex].Phase in ['B']:
Line = Line +'.2.0'
elif Sectionlist[trafoprimaryindex].Phase in ['C']:
Line = Line +'.3.0'
elif Sectionlist[trafoprimaryindex].Phase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
Line = Line + ' Tap='+str(float(trafos.SecondaryTap)/100)
if trafos.Conn == '6':
Line = Line +' Conn=delta'
elif trafos.Conn == '0':
Line = Line +' Conn=wye'
else:
print ('connection is different')
Line = Line + ' core=shell'
Line = Line + ' Basefreq='+'60'
DSSTransformers.append(Line)
csvtransformers2 = trafos.DeviceNumber + "," + Sectionlist[trafoprimaryindex].FromNodeID.replace(".","_")+ "," +Sectionlist[trafoprimaryindex].ToNodeID.replace(".","_")+ "," + trafos.Conn
CSVTransformers2.append(csvtransformers2)
return DSSTransformers, CSVTransformers2
def convert_load(CustomerClasslist, Loadslist, CustomerLoadslist, LoadModelInformationlist, LoadEquivalentlist, Sectionlist, Transformerlist, TransformerSettinglist):
DSSLoads = []
CSVLoads = ['Customer Number, Bus, Phase, Active Power [kW], Reactive Power [kVar], PF, Connection, ValueType']
LoadModel = '1'
#TODO: see what loadmodel is proper.
for Loadcust in CustomerLoadslist:
if Loadcust.LoadModelID == LoadModel:
Line = 'New Load.'+Loadcust.CustomerNumber+'_'+Loadcust.LoadModelID
else:
continue
if Loadcust.LoadPhase in ['A','B','C']:
Line = Line + ' Phases='+'1'
else:
Line = Line + ' Phases='+'3'
loadsection = Loadcust.SectionID
loadsectionindex = -1
for i in range(len(Sectionlist)):
if loadsection == Sectionlist[i].SectionID:
loadsectionindex = i
if loadsectionindex == -1 :
print('Load section not found')
Line = Line + ' Bus1='+Sectionlist[loadsectionindex].FromNodeID.replace(".","_")
if Loadcust.LoadPhase in ['A']:
Line = Line +'.1.0'
elif Loadcust.LoadPhase in ['B']:
Line = Line +'.2.0'
elif Loadcust.LoadPhase in ['C']:
Line = Line +'.3.0'
elif Loadcust.LoadPhase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
if (Loadcust.Value1) == "0.000000":
continue
if Loadcust.ValueType == '2':
Line = Line + ' kW='+Loadcust.Value1
Line = Line + ' pf='+str(float(Loadcust.Value2)/100)
elif Loadcust.ValueType == '0':
Line = Line + ' kW='+Loadcust.Value1
Line = Line + ' kVAr='+Loadcust.Value2
else:
print('A load with ValueType = {}'.format(Loadcust.ValueType))
if Loadcust.LoadPhase in ['A','B','C']:
Line = Line + ' kV='+'0.240'
else:
Line = Line + ' kV='+'0.208'
Line = Line + ' Basefreq='+'60'
Line = Line + ' Model='+'1'
Line = Line + ' Vminpu='+'0.95'
Line = Line + ' Vmaxpu='+'1.05'
DSSLoads.append(Line)
csvline = Loadcust.CustomerNumber+","+ Sectionlist[loadsectionindex].FromNodeID.replace(".","_")+","+Loadcust.LoadPhase+","+Loadcust.Value1+","+Loadcust.Value2+","+str(float(Loadcust.Value2)/100)+","+"wye"+","+Loadcust.ValueType
CSVLoads.append(csvline)
return DSSLoads,CSVLoads
def convert_generators(Sectionlist, Electronicconvertergeneratorlist, Electronicconvertergeneratorsettinglist, Converterlist, Convertercontrolsettinglist, Longtermdynamicscurveextlist, Dggenerationmodellist, Controlleddevicelist):
DSSGenerators = []
CSVPVs = ['Name, Bus, Phase, Voltage Level, Power Rating [kVA], PF']
for genpv in Electronicconvertergeneratorsettinglist:
Line = 'New generator.'+genpv.DeviceNumber
pvsection = genpv.SectionID
pvsectionindex = -1
for i in range(len(Sectionlist)):
if pvsection == Sectionlist[i].SectionID:
pvsectionindex = i
if pvsectionindex == -1 :
print('PV section not found')
Line = Line + ' Bus1='+Sectionlist[pvsectionindex].FromNodeID.replace(".","_")
if genpv.EqPhase in ['A']:
Line = Line +'.1.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['B']:
Line = Line +'.2.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['C']:
Line = Line +'.3.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['ABC']:
Line = Line +'.1.2.3'
Line = Line +' phases=3'
else:
print ('do not identified PV phase')
pvrating = genpv.DeviceNumber
pvratingindex = -1
for i in range(len(Dggenerationmodellist)):
if pvrating == Dggenerationmodellist[i].DeviceNumber and Dggenerationmodellist[i].LoadModelName == 'DEFAULT':
pvratingindex = i
if pvratingindex == -1 :
print('PV rating not found')
if float(Dggenerationmodellist[pvratingindex].ActiveGeneration) < 100:
Line = Line + ' kW='+Dggenerationmodellist[pvratingindex].ActiveGeneration
if float(Dggenerationmodellist[pvratingindex].PowerFactor) == 1.000000:
Line = Line + ' pf='+Dggenerationmodellist[pvratingindex].PowerFactor
else:
Line = Line + ' pf='+str(float(Dggenerationmodellist[pvratingindex].PowerFactor)/100)
else:
pvratingindex = -1
for i in range(len(Dggenerationmodellist)):
if pvrating == Dggenerationmodellist[i].DeviceNumber and Dggenerationmodellist[i].LoadModelName == 'LoadModelName':
#TODO: figure the proper load model out
pvratingindex = i
if pvratingindex == -1 :
print('PV rating not found')
if float(Dggenerationmodellist[pvratingindex].ActiveGeneration) < 100:
Line = Line + ' kW='+Dggenerationmodellist[pvratingindex].ActiveGeneration
if float(Dggenerationmodellist[pvratingindex].PowerFactor) == 1.000000:
Line = Line + ' pf='+Dggenerationmodellist[pvratingindex].PowerFactor
else:
Line = Line + ' pf='+str(float(Dggenerationmodellist[pvratingindex].PowerFactor)/100)
else:
#print ('generator {} is not used'.format(genpv.DeviceNumber))
continue
if genpv.EqPhase in ['A','B','C']:
Line = Line + ' kV=0.240'
else:
print('three phase PV found')
Line = Line + ' Basefreq='+'60'
Line = Line + ' Model='+'7'
Line = Line + ' Vminpu='+'0.95'
Line = Line + ' Vmaxpu='+'1.05'
DSSGenerators.append(Line)
csvline = genpv.DeviceNumber+','+Sectionlist[pvsectionindex].FromNodeID+','+genpv.EqPhase+','+'7.2'+','+Dggenerationmodellist[pvratingindex].ActiveGeneration+','+'1.0'
CSVPVs.append(csvline)
return DSSGenerators,CSVPVs
def convert_capacitors(ShuntCapacitorSettinglist, CapacitorExtltdlist, Sectionlist):
DSSCapacitors = []
CSVCapacitors = ['Rating (kVAR), Bus, Phases, Configuration']
for capacitors in ShuntCapacitorSettinglist:
Line = 'New Capacitor.'+capacitors.DeviceNumber
capsection = capacitors.SectionID
capssectionindex = -1
for i in range(len(Sectionlist)):
if capsection == Sectionlist[i].SectionID:
capssectionindex = i
if capssectionindex == -1 :
print('Capacitor section not found')
if capacitors.Location == '2':
Line = Line + ' Bus1='+Sectionlist[capssectionindex].FromNodeID.replace(".","_")
else:
print('Capacitor location is not 2/To bus. Please check and update the node.')
if Sectionlist[capssectionindex].Phase in ['A']:
Line = Line +'.1.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['B']:
Line = Line +'.2.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['C']:
Line = Line +'.3.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['ABC']:
Line = Line +'.1.2.3'+ ' Phases='+'3'
else:
print ('do not identified phase')
Line = Line + ' kVAr='+ str(float(capacitors.SwitchedKVARA)+float(capacitors.SwitchedKVARB)+float(capacitors.SwitchedKVARC))
Line = Line + ' kV=12.47'
if capacitors.Connection == 'Y':
Line = Line + ' Conn='+'wye'
else:
Line = Line + ' Conn='+'delta'
Line = Line + ' Basefreq='+'60'
DSSCapacitors.append(Line)
csvcapacitors = str(float(capacitors.SwitchedKVARA)+float(capacitors.SwitchedKVARB)+float(capacitors.SwitchedKVARC))+ "," + Sectionlist[capssectionindex].FromNodeID.replace(".","_")+ "," + Sectionlist[capssectionindex].Phase+ "," +capacitors.Connection
CSVCapacitors.append(csvcapacitors)
return DSSCapacitors, CSVCapacitors
def convert_protection(Switchlist, Breakerlist, Fuselist, SwitchSettinglist, BreakerSettinglist, FuseSettinglist, OvercurrentRelayInstrumentlist, CurrentTransformerInstrumentlist,OverheadByphaseSettinglist,UndergroundlineSettinglist, Sectionlist):
DSSProtecction = []
DSSSwitchcontrol = []
switchlist = []
CSVSW = ['Device Number, Location - Overhead, Location - Underhead, SW Location, Phase, Type, Cabinate/Transformer Location ']
for protectssw in SwitchSettinglist:
switch = protectssw.SectionID
swindexover = -1
swindexunder = -1
for i in range(len(OverheadByphaseSettinglist)):
if switch == OverheadByphaseSettinglist[i].SectionID:
swindexover = i
if swindexover == -1 :
for i in range(len(UndergroundlineSettinglist)):
if switch == UndergroundlineSettinglist[i].SectionID:
swindexunder = i
if swindexunder == -1 :
print("no switch")
else:
if UndergroundlineSettinglist[swindexunder].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +UndergroundlineSettinglist[swindexunder].DeviceNumber
switchlist.append(UndergroundlineSettinglist[swindexunder].DeviceNumber)
SWLine = 'New SwtControl.'+UndergroundlineSettinglist[swindexunder].DeviceNumber
SWLine = SWLine + ' SwitchedObj=Line.'+UndergroundlineSettinglist[swindexunder].DeviceNumber
else:
if OverheadByphaseSettinglist[swindexover].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +OverheadByphaseSettinglist[swindexover].DeviceNumber
switchlist.append(OverheadByphaseSettinglist[swindexover].DeviceNumber)
SWLine = 'New SwtControl.'+OverheadByphaseSettinglist[swindexover].DeviceNumber
SWLine = SWLine + ' SwitchedObj=Line.'+OverheadByphaseSettinglist[swindexover].DeviceNumber
Line = Line + ' switch=yes'
SWLine = SWLine + ' SwitchedTerm=1'
if protectssw.NStatus == '1':
print('Switch {} is open'.format(protectssw.SectionID))
SWLine = SWLine + ' Normal=Open Action=Open'
else:
SWLine = SWLine + ' Normal=Close Action=Close'
SWLine = SWLine + ' Delay=0'
DSSProtecction.append(Line)
DSSSwitchcontrol.append(SWLine)
csvsw = protectssw.DeviceNumber + "," + OverheadByphaseSettinglist[swindexover].DeviceNumber+ "," +UndergroundlineSettinglist[swindexunder].DeviceNumber + "," + str(swindexover)
swsectionindex = -1
for i in range(len(Sectionlist)):
if protectssw.SectionID == Sectionlist[i].SectionID:
swsectionindex = i
if swsectionindex == -1 :
print('Switch section not found for {}'.format(switch))
csvsw = csvsw + "," + Sectionlist[swsectionindex].Phase
CabinateNum=''
SwType='-1'
#TODO: switchtype and cabinate number does not work.
csvsw = csvsw + "," + SwType + "," + CabinateNum
CSVSW.append(csvsw)
CSVBR = ['Device Number, Location - Overhead, Location - Underhead, BR Location ']
for protectsbr in BreakerSettinglist:
breaker = protectsbr.SectionID
brindexover = -1
brindexunder = -1
for i in range(len(OverheadByphaseSettinglist)):
if breaker == OverheadByphaseSettinglist[i].SectionID:
brindexover = i
if brindexover == -1 :
for i in range(len(UndergroundlineSettinglist)):
if breaker == UndergroundlineSettinglist[i].SectionID:
brindexunder = i
if brindexunder == -1 :
print("no switch")
else:
if UndergroundlineSettinglist[brindexunder].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +UndergroundlineSettinglist[brindexunder].DeviceNumber
switchlist.append(UndergroundlineSettinglist[brindexunder].DeviceNumber)
else:
if OverheadByphaseSettinglist[brindexover].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line | |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.container import cluster_pb2
from google3.cloud.graphite.mmv2.services.google.container import cluster_pb2_grpc
from typing import List
class Cluster(object):
def __init__(
self,
name: str = None,
description: str = None,
initial_node_count: int = None,
master_auth: dict = None,
logging_service: str = None,
monitoring_service: str = None,
network: str = None,
cluster_ipv4_cidr: str = None,
addons_config: dict = None,
subnetwork: str = None,
node_pools: list = None,
locations: list = None,
enable_kubernetes_alpha: bool = None,
resource_labels: dict = None,
label_fingerprint: str = None,
legacy_abac: dict = None,
network_policy: dict = None,
ip_allocation_policy: dict = None,
master_authorized_networks_config: dict = None,
binary_authorization: dict = None,
autoscaling: dict = None,
network_config: dict = None,
maintenance_policy: dict = None,
default_max_pods_constraint: dict = None,
resource_usage_export_config: dict = None,
authenticator_groups_config: dict = None,
private_cluster_config: dict = None,
database_encryption: dict = None,
vertical_pod_autoscaling: dict = None,
shielded_nodes: dict = None,
endpoint: str = None,
master_version: str = None,
create_time: str = None,
status: str = None,
status_message: str = None,
node_ipv4_cidr_size: int = None,
services_ipv4_cidr: str = None,
expire_time: str = None,
location: str = None,
enable_tpu: bool = None,
tpu_ipv4_cidr_block: str = None,
conditions: list = None,
autopilot: dict = None,
project: str = None,
node_config: dict = None,
release_channel: dict = None,
workload_identity_config: dict = None,
notification_config: dict = None,
confidential_nodes: dict = None,
self_link: str = None,
zone: str = None,
initial_cluster_version: str = None,
current_master_version: str = None,
current_node_version: str = None,
instance_group_urls: list = None,
current_node_count: int = None,
id: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.initial_node_count = initial_node_count
self.master_auth = master_auth
self.logging_service = logging_service
self.monitoring_service = monitoring_service
self.network = network
self.cluster_ipv4_cidr = cluster_ipv4_cidr
self.addons_config = addons_config
self.subnetwork = subnetwork
self.node_pools = node_pools
self.locations = locations
self.enable_kubernetes_alpha = enable_kubernetes_alpha
self.resource_labels = resource_labels
self.label_fingerprint = label_fingerprint
self.legacy_abac = legacy_abac
self.network_policy = network_policy
self.ip_allocation_policy = ip_allocation_policy
self.master_authorized_networks_config = master_authorized_networks_config
self.binary_authorization = binary_authorization
self.autoscaling = autoscaling
self.network_config = network_config
self.maintenance_policy = maintenance_policy
self.default_max_pods_constraint = default_max_pods_constraint
self.resource_usage_export_config = resource_usage_export_config
self.authenticator_groups_config = authenticator_groups_config
self.private_cluster_config = private_cluster_config
self.database_encryption = database_encryption
self.vertical_pod_autoscaling = vertical_pod_autoscaling
self.shielded_nodes = shielded_nodes
self.master_version = master_version
self.location = location
self.enable_tpu = enable_tpu
self.conditions = conditions
self.autopilot = autopilot
self.project = project
self.node_config = node_config
self.release_channel = release_channel
self.workload_identity_config = workload_identity_config
self.notification_config = notification_config
self.confidential_nodes = confidential_nodes
self.initial_cluster_version = initial_cluster_version
self.instance_group_urls = instance_group_urls
self.service_account_file = service_account_file
def apply(self):
stub = cluster_pb2_grpc.ContainerClusterServiceStub(channel.Channel())
request = cluster_pb2.ApplyContainerClusterRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.initial_node_count):
request.resource.initial_node_count = Primitive.to_proto(
self.initial_node_count
)
if ClusterMasterAuth.to_proto(self.master_auth):
request.resource.master_auth.CopyFrom(
ClusterMasterAuth.to_proto(self.master_auth)
)
else:
request.resource.ClearField("master_auth")
if Primitive.to_proto(self.logging_service):
request.resource.logging_service = Primitive.to_proto(self.logging_service)
if Primitive.to_proto(self.monitoring_service):
request.resource.monitoring_service = Primitive.to_proto(
self.monitoring_service
)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.cluster_ipv4_cidr):
request.resource.cluster_ipv4_cidr = Primitive.to_proto(
self.cluster_ipv4_cidr
)
if ClusterAddonsConfig.to_proto(self.addons_config):
request.resource.addons_config.CopyFrom(
ClusterAddonsConfig.to_proto(self.addons_config)
)
else:
request.resource.ClearField("addons_config")
if Primitive.to_proto(self.subnetwork):
request.resource.subnetwork = Primitive.to_proto(self.subnetwork)
if ClusterNodePoolsArray.to_proto(self.node_pools):
request.resource.node_pools.extend(
ClusterNodePoolsArray.to_proto(self.node_pools)
)
if Primitive.to_proto(self.locations):
request.resource.locations.extend(Primitive.to_proto(self.locations))
if Primitive.to_proto(self.enable_kubernetes_alpha):
request.resource.enable_kubernetes_alpha = Primitive.to_proto(
self.enable_kubernetes_alpha
)
if Primitive.to_proto(self.resource_labels):
request.resource.resource_labels = Primitive.to_proto(self.resource_labels)
if Primitive.to_proto(self.label_fingerprint):
request.resource.label_fingerprint = Primitive.to_proto(
self.label_fingerprint
)
if ClusterLegacyAbac.to_proto(self.legacy_abac):
request.resource.legacy_abac.CopyFrom(
ClusterLegacyAbac.to_proto(self.legacy_abac)
)
else:
request.resource.ClearField("legacy_abac")
if ClusterNetworkPolicy.to_proto(self.network_policy):
request.resource.network_policy.CopyFrom(
ClusterNetworkPolicy.to_proto(self.network_policy)
)
else:
request.resource.ClearField("network_policy")
if ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy):
request.resource.ip_allocation_policy.CopyFrom(
ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy)
)
else:
request.resource.ClearField("ip_allocation_policy")
if ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
):
request.resource.master_authorized_networks_config.CopyFrom(
ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
)
)
else:
request.resource.ClearField("master_authorized_networks_config")
if ClusterBinaryAuthorization.to_proto(self.binary_authorization):
request.resource.binary_authorization.CopyFrom(
ClusterBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
request.resource.ClearField("binary_authorization")
if ClusterAutoscaling.to_proto(self.autoscaling):
request.resource.autoscaling.CopyFrom(
ClusterAutoscaling.to_proto(self.autoscaling)
)
else:
request.resource.ClearField("autoscaling")
if ClusterNetworkConfig.to_proto(self.network_config):
request.resource.network_config.CopyFrom(
ClusterNetworkConfig.to_proto(self.network_config)
)
else:
request.resource.ClearField("network_config")
if ClusterMaintenancePolicy.to_proto(self.maintenance_policy):
request.resource.maintenance_policy.CopyFrom(
ClusterMaintenancePolicy.to_proto(self.maintenance_policy)
)
else:
request.resource.ClearField("maintenance_policy")
if ClusterDefaultMaxPodsConstraint.to_proto(self.default_max_pods_constraint):
request.resource.default_max_pods_constraint.CopyFrom(
ClusterDefaultMaxPodsConstraint.to_proto(
self.default_max_pods_constraint
)
)
else:
request.resource.ClearField("default_max_pods_constraint")
if ClusterResourceUsageExportConfig.to_proto(self.resource_usage_export_config):
request.resource.resource_usage_export_config.CopyFrom(
ClusterResourceUsageExportConfig.to_proto(
self.resource_usage_export_config
)
)
else:
request.resource.ClearField("resource_usage_export_config")
if ClusterAuthenticatorGroupsConfig.to_proto(self.authenticator_groups_config):
request.resource.authenticator_groups_config.CopyFrom(
ClusterAuthenticatorGroupsConfig.to_proto(
self.authenticator_groups_config
)
)
else:
request.resource.ClearField("authenticator_groups_config")
if ClusterPrivateClusterConfig.to_proto(self.private_cluster_config):
request.resource.private_cluster_config.CopyFrom(
ClusterPrivateClusterConfig.to_proto(self.private_cluster_config)
)
else:
request.resource.ClearField("private_cluster_config")
if ClusterDatabaseEncryption.to_proto(self.database_encryption):
request.resource.database_encryption.CopyFrom(
ClusterDatabaseEncryption.to_proto(self.database_encryption)
)
else:
request.resource.ClearField("database_encryption")
if ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling):
request.resource.vertical_pod_autoscaling.CopyFrom(
ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling)
)
else:
request.resource.ClearField("vertical_pod_autoscaling")
if ClusterShieldedNodes.to_proto(self.shielded_nodes):
request.resource.shielded_nodes.CopyFrom(
ClusterShieldedNodes.to_proto(self.shielded_nodes)
)
else:
request.resource.ClearField("shielded_nodes")
if Primitive.to_proto(self.master_version):
request.resource.master_version = Primitive.to_proto(self.master_version)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.enable_tpu):
request.resource.enable_tpu = Primitive.to_proto(self.enable_tpu)
if ClusterConditionsArray.to_proto(self.conditions):
request.resource.conditions.extend(
ClusterConditionsArray.to_proto(self.conditions)
)
if ClusterAutopilot.to_proto(self.autopilot):
request.resource.autopilot.CopyFrom(
ClusterAutopilot.to_proto(self.autopilot)
)
else:
request.resource.ClearField("autopilot")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if ClusterNodeConfig.to_proto(self.node_config):
request.resource.node_config.CopyFrom(
ClusterNodeConfig.to_proto(self.node_config)
)
else:
request.resource.ClearField("node_config")
if ClusterReleaseChannel.to_proto(self.release_channel):
request.resource.release_channel.CopyFrom(
ClusterReleaseChannel.to_proto(self.release_channel)
)
else:
request.resource.ClearField("release_channel")
if ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config):
request.resource.workload_identity_config.CopyFrom(
ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config)
)
else:
request.resource.ClearField("workload_identity_config")
if ClusterNotificationConfig.to_proto(self.notification_config):
request.resource.notification_config.CopyFrom(
ClusterNotificationConfig.to_proto(self.notification_config)
)
else:
request.resource.ClearField("notification_config")
if ClusterConfidentialNodes.to_proto(self.confidential_nodes):
request.resource.confidential_nodes.CopyFrom(
ClusterConfidentialNodes.to_proto(self.confidential_nodes)
)
else:
request.resource.ClearField("confidential_nodes")
if Primitive.to_proto(self.initial_cluster_version):
request.resource.initial_cluster_version = Primitive.to_proto(
self.initial_cluster_version
)
if Primitive.to_proto(self.instance_group_urls):
request.resource.instance_group_urls.extend(
Primitive.to_proto(self.instance_group_urls)
)
request.service_account_file = self.service_account_file
response = stub.ApplyContainerCluster(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.initial_node_count = Primitive.from_proto(response.initial_node_count)
self.master_auth = ClusterMasterAuth.from_proto(response.master_auth)
self.logging_service = Primitive.from_proto(response.logging_service)
self.monitoring_service = Primitive.from_proto(response.monitoring_service)
self.network = Primitive.from_proto(response.network)
self.cluster_ipv4_cidr = Primitive.from_proto(response.cluster_ipv4_cidr)
self.addons_config = ClusterAddonsConfig.from_proto(response.addons_config)
self.subnetwork = Primitive.from_proto(response.subnetwork)
self.node_pools = ClusterNodePoolsArray.from_proto(response.node_pools)
self.locations = Primitive.from_proto(response.locations)
self.enable_kubernetes_alpha = Primitive.from_proto(
response.enable_kubernetes_alpha
)
self.resource_labels = Primitive.from_proto(response.resource_labels)
self.label_fingerprint = Primitive.from_proto(response.label_fingerprint)
self.legacy_abac = ClusterLegacyAbac.from_proto(response.legacy_abac)
self.network_policy = ClusterNetworkPolicy.from_proto(response.network_policy)
self.ip_allocation_policy = ClusterIPAllocationPolicy.from_proto(
response.ip_allocation_policy
)
self.master_authorized_networks_config = ClusterMasterAuthorizedNetworksConfig.from_proto(
response.master_authorized_networks_config
)
self.binary_authorization = ClusterBinaryAuthorization.from_proto(
response.binary_authorization
)
self.autoscaling = ClusterAutoscaling.from_proto(response.autoscaling)
self.network_config = ClusterNetworkConfig.from_proto(response.network_config)
self.maintenance_policy = ClusterMaintenancePolicy.from_proto(
response.maintenance_policy
)
self.default_max_pods_constraint = ClusterDefaultMaxPodsConstraint.from_proto(
response.default_max_pods_constraint
)
self.resource_usage_export_config = ClusterResourceUsageExportConfig.from_proto(
response.resource_usage_export_config
)
self.authenticator_groups_config = ClusterAuthenticatorGroupsConfig.from_proto(
response.authenticator_groups_config
)
self.private_cluster_config = ClusterPrivateClusterConfig.from_proto(
response.private_cluster_config
)
self.database_encryption = ClusterDatabaseEncryption.from_proto(
response.database_encryption
)
self.vertical_pod_autoscaling = ClusterVerticalPodAutoscaling.from_proto(
response.vertical_pod_autoscaling
)
self.shielded_nodes = ClusterShieldedNodes.from_proto(response.shielded_nodes)
self.endpoint = Primitive.from_proto(response.endpoint)
self.master_version = Primitive.from_proto(response.master_version)
self.create_time = Primitive.from_proto(response.create_time)
self.status = Primitive.from_proto(response.status)
self.status_message = Primitive.from_proto(response.status_message)
self.node_ipv4_cidr_size = Primitive.from_proto(response.node_ipv4_cidr_size)
self.services_ipv4_cidr = Primitive.from_proto(response.services_ipv4_cidr)
self.expire_time = Primitive.from_proto(response.expire_time)
self.location = Primitive.from_proto(response.location)
self.enable_tpu = Primitive.from_proto(response.enable_tpu)
self.tpu_ipv4_cidr_block = Primitive.from_proto(response.tpu_ipv4_cidr_block)
self.conditions = ClusterConditionsArray.from_proto(response.conditions)
self.autopilot = ClusterAutopilot.from_proto(response.autopilot)
self.project = Primitive.from_proto(response.project)
self.node_config = ClusterNodeConfig.from_proto(response.node_config)
self.release_channel = ClusterReleaseChannel.from_proto(
response.release_channel
)
self.workload_identity_config = ClusterWorkloadIdentityConfig.from_proto(
response.workload_identity_config
)
self.notification_config = ClusterNotificationConfig.from_proto(
response.notification_config
)
self.confidential_nodes = ClusterConfidentialNodes.from_proto(
response.confidential_nodes
)
self.self_link = Primitive.from_proto(response.self_link)
self.zone = Primitive.from_proto(response.zone)
self.initial_cluster_version = Primitive.from_proto(
response.initial_cluster_version
)
self.current_master_version = Primitive.from_proto(
response.current_master_version
)
self.current_node_version = Primitive.from_proto(response.current_node_version)
self.instance_group_urls = Primitive.from_proto(response.instance_group_urls)
self.current_node_count = Primitive.from_proto(response.current_node_count)
self.id = Primitive.from_proto(response.id)
def delete(self):
stub = cluster_pb2_grpc.ContainerClusterServiceStub(channel.Channel())
request = cluster_pb2.DeleteContainerClusterRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.initial_node_count):
request.resource.initial_node_count = Primitive.to_proto(
self.initial_node_count
)
if ClusterMasterAuth.to_proto(self.master_auth):
request.resource.master_auth.CopyFrom(
ClusterMasterAuth.to_proto(self.master_auth)
)
else:
request.resource.ClearField("master_auth")
if Primitive.to_proto(self.logging_service):
request.resource.logging_service = Primitive.to_proto(self.logging_service)
if Primitive.to_proto(self.monitoring_service):
request.resource.monitoring_service = Primitive.to_proto(
self.monitoring_service
)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.cluster_ipv4_cidr):
request.resource.cluster_ipv4_cidr = Primitive.to_proto(
self.cluster_ipv4_cidr
)
if ClusterAddonsConfig.to_proto(self.addons_config):
request.resource.addons_config.CopyFrom(
ClusterAddonsConfig.to_proto(self.addons_config)
)
else:
request.resource.ClearField("addons_config")
if Primitive.to_proto(self.subnetwork):
request.resource.subnetwork = Primitive.to_proto(self.subnetwork)
if ClusterNodePoolsArray.to_proto(self.node_pools):
request.resource.node_pools.extend(
ClusterNodePoolsArray.to_proto(self.node_pools)
)
if Primitive.to_proto(self.locations):
request.resource.locations.extend(Primitive.to_proto(self.locations))
if Primitive.to_proto(self.enable_kubernetes_alpha):
request.resource.enable_kubernetes_alpha = Primitive.to_proto(
self.enable_kubernetes_alpha
)
if Primitive.to_proto(self.resource_labels):
request.resource.resource_labels = Primitive.to_proto(self.resource_labels)
if Primitive.to_proto(self.label_fingerprint):
request.resource.label_fingerprint = Primitive.to_proto(
self.label_fingerprint
)
if ClusterLegacyAbac.to_proto(self.legacy_abac):
request.resource.legacy_abac.CopyFrom(
ClusterLegacyAbac.to_proto(self.legacy_abac)
)
else:
request.resource.ClearField("legacy_abac")
if ClusterNetworkPolicy.to_proto(self.network_policy):
request.resource.network_policy.CopyFrom(
ClusterNetworkPolicy.to_proto(self.network_policy)
)
else:
request.resource.ClearField("network_policy")
if ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy):
request.resource.ip_allocation_policy.CopyFrom(
ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy)
)
else:
request.resource.ClearField("ip_allocation_policy")
if ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
):
request.resource.master_authorized_networks_config.CopyFrom(
ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
)
)
else:
request.resource.ClearField("master_authorized_networks_config")
if ClusterBinaryAuthorization.to_proto(self.binary_authorization):
request.resource.binary_authorization.CopyFrom(
ClusterBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
request.resource.ClearField("binary_authorization")
if ClusterAutoscaling.to_proto(self.autoscaling):
request.resource.autoscaling.CopyFrom(
ClusterAutoscaling.to_proto(self.autoscaling)
)
else:
request.resource.ClearField("autoscaling")
if ClusterNetworkConfig.to_proto(self.network_config):
request.resource.network_config.CopyFrom(
ClusterNetworkConfig.to_proto(self.network_config)
)
else:
request.resource.ClearField("network_config")
if ClusterMaintenancePolicy.to_proto(self.maintenance_policy):
request.resource.maintenance_policy.CopyFrom(
ClusterMaintenancePolicy.to_proto(self.maintenance_policy)
)
else:
request.resource.ClearField("maintenance_policy")
if ClusterDefaultMaxPodsConstraint.to_proto(self.default_max_pods_constraint):
request.resource.default_max_pods_constraint.CopyFrom(
ClusterDefaultMaxPodsConstraint.to_proto(
self.default_max_pods_constraint
)
)
else:
request.resource.ClearField("default_max_pods_constraint")
if ClusterResourceUsageExportConfig.to_proto(self.resource_usage_export_config):
request.resource.resource_usage_export_config.CopyFrom(
ClusterResourceUsageExportConfig.to_proto(
self.resource_usage_export_config
)
)
else:
request.resource.ClearField("resource_usage_export_config")
if ClusterAuthenticatorGroupsConfig.to_proto(self.authenticator_groups_config):
request.resource.authenticator_groups_config.CopyFrom(
ClusterAuthenticatorGroupsConfig.to_proto(
self.authenticator_groups_config
)
)
else:
request.resource.ClearField("authenticator_groups_config")
if ClusterPrivateClusterConfig.to_proto(self.private_cluster_config):
request.resource.private_cluster_config.CopyFrom(
ClusterPrivateClusterConfig.to_proto(self.private_cluster_config)
)
else:
request.resource.ClearField("private_cluster_config")
if ClusterDatabaseEncryption.to_proto(self.database_encryption):
request.resource.database_encryption.CopyFrom(
ClusterDatabaseEncryption.to_proto(self.database_encryption)
)
else:
request.resource.ClearField("database_encryption")
if ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling):
request.resource.vertical_pod_autoscaling.CopyFrom(
ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling)
)
else:
request.resource.ClearField("vertical_pod_autoscaling")
if ClusterShieldedNodes.to_proto(self.shielded_nodes):
request.resource.shielded_nodes.CopyFrom(
ClusterShieldedNodes.to_proto(self.shielded_nodes)
)
else:
request.resource.ClearField("shielded_nodes")
if Primitive.to_proto(self.master_version):
request.resource.master_version = Primitive.to_proto(self.master_version)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.enable_tpu):
request.resource.enable_tpu = Primitive.to_proto(self.enable_tpu)
if ClusterConditionsArray.to_proto(self.conditions):
request.resource.conditions.extend(
ClusterConditionsArray.to_proto(self.conditions)
)
if ClusterAutopilot.to_proto(self.autopilot):
request.resource.autopilot.CopyFrom(
ClusterAutopilot.to_proto(self.autopilot)
)
else:
request.resource.ClearField("autopilot")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if ClusterNodeConfig.to_proto(self.node_config):
request.resource.node_config.CopyFrom(
ClusterNodeConfig.to_proto(self.node_config)
)
else:
request.resource.ClearField("node_config")
if ClusterReleaseChannel.to_proto(self.release_channel):
request.resource.release_channel.CopyFrom(
ClusterReleaseChannel.to_proto(self.release_channel)
)
else:
request.resource.ClearField("release_channel")
if ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config):
request.resource.workload_identity_config.CopyFrom(
ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config)
)
else:
request.resource.ClearField("workload_identity_config")
if ClusterNotificationConfig.to_proto(self.notification_config):
request.resource.notification_config.CopyFrom(
ClusterNotificationConfig.to_proto(self.notification_config)
)
else:
request.resource.ClearField("notification_config")
if ClusterConfidentialNodes.to_proto(self.confidential_nodes):
request.resource.confidential_nodes.CopyFrom(
ClusterConfidentialNodes.to_proto(self.confidential_nodes)
)
else:
request.resource.ClearField("confidential_nodes")
if Primitive.to_proto(self.initial_cluster_version):
request.resource.initial_cluster_version = Primitive.to_proto(
self.initial_cluster_version
)
if Primitive.to_proto(self.instance_group_urls):
request.resource.instance_group_urls.extend(
Primitive.to_proto(self.instance_group_urls)
)
response = stub.DeleteContainerCluster(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = cluster_pb2_grpc.ContainerClusterServiceStub(channel.Channel())
request = cluster_pb2.ListContainerClusterRequest()
request.service_account_file = | |
2, 2, 2), 348),
Sample(False, (3, 2, 1, 2, 3, 1), 349),
Sample(False, (3, 2, 1, 2, 3, 2), 350),
Sample(False, (3, 2, 1, 2, 4, 1), 351),
Sample(False, (3, 2, 1, 2, 4, 2), 352),
Sample(True, (3, 2, 1, 3, 1, 1), 353),
Sample(True, (3, 2, 1, 3, 1, 2), 354),
Sample(False, (3, 2, 1, 3, 2, 1), 355),
Sample(False, (3, 2, 1, 3, 2, 2), 356),
Sample(False, (3, 2, 1, 3, 3, 1), 357),
Sample(False, (3, 2, 1, 3, 3, 2), 358),
Sample(False, (3, 2, 1, 3, 4, 1), 359),
Sample(False, (3, 2, 1, 3, 4, 2), 360),
Sample(True, (3, 2, 2, 1, 1, 1), 361),
Sample(True, (3, 2, 2, 1, 1, 2), 362),
Sample(False, (3, 2, 2, 1, 2, 1), 363),
Sample(False, (3, 2, 2, 1, 2, 2), 364),
Sample(False, (3, 2, 2, 1, 3, 1), 365),
Sample(False, (3, 2, 2, 1, 3, 2), 366),
Sample(False, (3, 2, 2, 1, 4, 1), 367),
Sample(False, (3, 2, 2, 1, 4, 2), 368),
Sample(True, (3, 2, 2, 2, 1, 1), 369),
Sample(True, (3, 2, 2, 2, 1, 2), 370),
Sample(False, (3, 2, 2, 2, 2, 1), 371),
Sample(False, (3, 2, 2, 2, 2, 2), 372),
Sample(False, (3, 2, 2, 2, 3, 1), 373),
Sample(False, (3, 2, 2, 2, 3, 2), 374),
Sample(False, (3, 2, 2, 2, 4, 1), 375),
Sample(False, (3, 2, 2, 2, 4, 2), 376),
Sample(True, (3, 2, 2, 3, 1, 1), 377),
Sample(True, (3, 2, 2, 3, 1, 2), 378),
Sample(False, (3, 2, 2, 3, 2, 1), 379),
Sample(False, (3, 2, 2, 3, 2, 2), 380),
Sample(False, (3, 2, 2, 3, 3, 1), 381),
Sample(False, (3, 2, 2, 3, 3, 2), 382),
Sample(False, (3, 2, 2, 3, 4, 1), 383),
Sample(False, (3, 2, 2, 3, 4, 2), 384),
Sample(True, (3, 3, 1, 1, 1, 1), 385),
Sample(True, (3, 3, 1, 1, 1, 2), 386),
Sample(True, (3, 3, 1, 1, 2, 1), 387),
Sample(True, (3, 3, 1, 1, 2, 2), 388),
Sample(True, (3, 3, 1, 1, 3, 1), 389),
Sample(True, (3, 3, 1, 1, 3, 2), 390),
Sample(True, (3, 3, 1, 1, 4, 1), 391),
Sample(True, (3, 3, 1, 1, 4, 2), 392),
Sample(True, (3, 3, 1, 2, 1, 1), 393),
Sample(True, (3, 3, 1, 2, 1, 2), 394),
Sample(True, (3, 3, 1, 2, 2, 1), 395),
Sample(True, (3, 3, 1, 2, 2, 2), 396),
Sample(True, (3, 3, 1, 2, 3, 1), 397),
Sample(True, (3, 3, 1, 2, 3, 2), 398),
Sample(True, (3, 3, 1, 2, 4, 1), 399),
Sample(True, (3, 3, 1, 2, 4, 2), 400),
Sample(True, (3, 3, 1, 3, 1, 1), 401),
Sample(True, (3, 3, 1, 3, 1, 2), 402),
Sample(True, (3, 3, 1, 3, 2, 1), 403),
Sample(True, (3, 3, 1, 3, 2, 2), 404),
Sample(True, (3, 3, 1, 3, 3, 1), 405),
Sample(True, (3, 3, 1, 3, 3, 2), 406),
Sample(True, (3, 3, 1, 3, 4, 1), 407),
Sample(True, (3, 3, 1, 3, 4, 2), 408),
Sample(True, (3, 3, 2, 1, 1, 1), 409),
Sample(True, (3, 3, 2, 1, 1, 2), 410),
Sample(True, (3, 3, 2, 1, 2, 1), 411),
Sample(True, (3, 3, 2, 1, 2, 2), 412),
Sample(True, (3, 3, 2, 1, 3, 1), 413),
Sample(True, (3, 3, 2, 1, 3, 2), 414),
Sample(True, (3, 3, 2, 1, 4, 1), 415),
Sample(True, (3, 3, 2, 1, 4, 2), 416),
Sample(True, (3, 3, 2, 2, 1, 1), 417),
Sample(True, (3, 3, 2, 2, 1, 2), 418),
Sample(True, (3, 3, 2, 2, 2, 1), 419),
Sample(True, (3, 3, 2, 2, 2, 2), 420),
Sample(True, (3, 3, 2, 2, 3, 1), 421),
Sample(True, (3, 3, 2, 2, 3, 2), 422),
Sample(True, (3, 3, 2, 2, 4, 1), 423),
Sample(True, (3, 3, 2, 2, 4, 2), 424),
Sample(True, (3, 3, 2, 3, 1, 1), 425),
Sample(True, (3, 3, 2, 3, 1, 2), 426),
Sample(True, (3, 3, 2, 3, 2, 1), 427),
Sample(True, (3, 3, 2, 3, 2, 2), 428),
Sample(True, (3, 3, 2, 3, 3, 1), 429),
Sample(True, (3, 3, 2, 3, 3, 2), 430),
Sample(True, (3, 3, 2, 3, 4, 1), 431),
Sample(True, (3, 3, 2, 3, 4, 2), 432))
monk2 = (
Sample(False, (1, 1, 1, 1, 2, 2), 4),
Sample(False, (1, 1, 1, 1, 4, 1), 7),
Sample(False, (1, 1, 1, 2, 1, 1), 9),
Sample(False, (1, 1, 1, 2, 1, 2), 10),
Sample(False, (1, 1, 1, 2, 2, 1), 11),
Sample(False, (1, 1, 1, 2, 3, 1), 13),
Sample(False, (1, 1, 1, 2, 4, 1), 15),
Sample(False, (1, 1, 1, 3, 2, 1), 19),
Sample(False, (1, 1, 1, 3, 4, 1), 23),
Sample(False, (1, 1, 2, 1, 1, 1), 25),
Sample(False, (1, 1, 2, 1, 1, 2), 26),
Sample(False, (1, 1, 2, 2, 3, 1), 37),
Sample(False, (1, 1, 2, 2, 4, 1), 39),
Sample(True, (1, 1, 2, 2, 4, 2), 40),
Sample(False, (1, 1, 2, 3, 1, 2), 42),
Sample(True, (1, 1, 2, 3, 2, 2), 44),
Sample(False, (1, 2, 1, 1, 1, 2), 50),
Sample(False, (1, 2, 1, 2, 1, 2), 58),
Sample(True, (1, 2, 1, 2, 2, 2), 60),
Sample(False, (1, 2, 1, 2, 3, 1), 61),
Sample(True, (1, 2, 1, 2, 3, 2), 62),
Sample(False, (1, 2, 1, 2, 4, 1), 63),
Sample(False, (1, 2, 1, 3, 1, 1), 65),
Sample(False, (1, 2, 1, 3, 1, 2), 66),
Sample(True, (1, 2, 1, 3, 2, 2), 68),
Sample(False, (1, 2, 1, 3, 3, 1), 69),
Sample(True, (1, 2, 1, 3, 3, 2), 70),
Sample(False, (1, 2, 1, 3, 4, 1), 71),
Sample(True, (1, 2, 1, 3, 4, 2), 72),
Sample(False, (1, 2, 2, 1, 2, 1), 75),
Sample(False, (1, 2, 2, 1, 4, 1), 79),
Sample(True, (1, 2, 2, 2, 3, 1), 85),
Sample(True, (1, 2, 2, 2, 4, 1), 87),
Sample(False, (1, 2, 2, 3, 1, 1), 89),
Sample(True, (1, 2, 2, 3, 1, 2), 90),
Sample(True, (1, 2, 2, 3, 3, 1), 93),
Sample(False, (1, 2, 2, 3, 3, 2), 94),
Sample(True, (1, 2, 2, 3, 4, 1), 95),
Sample(False, (1, 2, 2, 3, 4, 2), 96),
Sample(False, (1, 3, 1, 1, 1, 2), 98),
Sample(False, (1, 3, 1, 1, 2, 2), 100),
Sample(False, (1, 3, 1, 1, 3, 1), 101),
Sample(False, (1, 3, 1, 1, 3, 2), 102),
Sample(False, (1, 3, 1, 2, 2, 1), 107),
Sample(True, (1, 3, 1, 2, 2, 2), 108),
Sample(True, (1, 3, 1, 2, 3, 2), 110),
Sample(False, (1, 3, 1, 2, 4, 1), 111),
Sample(True, (1, 3, 1, 3, 2, 2), 116),
Sample(False, (1, 3, 1, 3, 3, 1), 117),
Sample(True, (1, 3, 1, 3, 4, 2), 120),
Sample(False, (1, 3, 2, 1, 3, 1), 125),
Sample(True, (1, 3, 2, 1, 3, 2), 126),
Sample(False, (1, 3, 2, 1, 4, 1), 127),
Sample(True, (1, 3, 2, 2, 1, 2), 130),
Sample(False, (1, 3, 2, 2, 3, 2), 134),
Sample(False, (1, 3, 2, 2, 4, 2), 136),
Sample(True, (1, 3, 2, 3, 2, 1), 139),
Sample(False, (2, 1, 1, 1, 1, 1), 145),
Sample(False, (2, 1, 1, 1, 2, 2), 148),
Sample(False, (2, 1, 1, 1, 3, 1), 149),
Sample(True, (2, 1, 1, 2, 2, 2), 156),
Sample(False, (2, 1, 1, 3, 1, 2), 162),
Sample(True, (2, 1, 1, 3, 2, 2), 164),
Sample(True, (2, 1, 1, 3, 3, 2), 166),
Sample(False, (2, 1, 1, 3, 4, 1), 167),
Sample(False, (2, 1, 2, 1, 1, 1), 169),
Sample(True, (2, 1, 2, 1, 2, 2), 172),
Sample(False, (2, 1, 2, 1, 4, 1), 175),
Sample(True, (2, 1, 2, 2, 2, 1), 179),
Sample(False, (2, 1, 2, 2, 4, 2), 184),
Sample(False, (2, 1, 2, 3, 1, 1), 185),
Sample(True, (2, 1, 2, 3, 1, 2), 186),
Sample(False, (2, 1, 2, 3, 2, 2), 188),
Sample(False, (2, 1, 2, 3, 3, 2), 190),
Sample(False, (2, 1, 2, 3, 4, 2), 192),
Sample(False, (2, | |
<reponame>jlovering/ChallengerParser<gh_stars>0
import unittest
import logging
import sys
import re
import testCaseSoT
import ChallengerParser as parser
import ChallengerGrammar
import tatsu
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class DayTest():
def deepCompare(self, struct1, struct2):
truthy = []
for (s1, s2) in zip(struct1, struct2):
#logging.debug("%s %s" % (s1, s2))
try:
if len(s1) == len(s2):
if len(s1) == 1:
truthy.append(s1 == s2)
else:
truthy.append(self.deepCompare(s1, s2))
else:
truthy.append(False)
except:
truthy.append(s1 == s2)
#logging.debug(truthy)
return all(truthy)
def testParse(self):
par = parser.Input(self.infile, self.definition)
outData = par.parse()
logging.debug(outData)
SoT = eval("testCaseSoT.%s" % type(self).__name__.replace("_Strings",""))
logging.debug(SoT)
assert self.deepCompare(SoT, outData)
def tearDown(self):
self.infile.close()
class Day1Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder(parser.ListBuilder(parser.LiteralBlock(int), ""))
self.infile = open("testfiles/day1-testInput", "r")
class Day1Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
#int#
]]''')
self.infile = open("testfiles/day1-testInput", "r")
class Day2Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( \
[ \
parser.ListBlock(int, '-'),
parser.LiteralBlock(lambda e: str(e)[:-1]),
parser.LiteralBlock(str)
], ' '), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day2-testInput", "r")
class Day2Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day2Test_Strings_custom', lambda s: s[:-1])
self.definition.buildersFromStr('''[[
([int '-'] #Day2Test_Strings_custom# #str# ' ')
]]''')
self.infile = open("testfiles/day2-testInput", "r")
class Day3Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBlock(str, None), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day3-testInput", "r")
class Day3Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
[str None]
]]''')
self.infile = open("testfiles/day3-testInput", "r")
class Day4Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiLineSpanBuilder( \
parser.HashLineBlock( \
parser.HashPairBlock(str, str, ':'), \
' '), \
#parser.LiteralBlock(str), \
' ', parser.EMPTYLINE), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day4-testInput", "r")
class Day4Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
{{
{*str str ':' ' '}
}}
]]''')
self.infile = open("testfiles/day4-testInput", "r")
class Day5Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder(parser.ListBuilder(parser.LiteralBlock(lambda v: int(parser.tr(v, 'BFRL', '1010'), 2)), ""))
self.infile = open("testfiles/day5-testInput", "r")
class Day5Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day5Test_Strings_custom', lambda v: int(parser.tr(v, 'BFRL', '1010'), 2))
self.definition.buildersFromStr('''[[
#Day5Test_Strings_custom#
]]''')
self.infile = open("testfiles/day5-testInput", "r")
class Day6Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBuilder( \
parser.SetBlock(str, parser.NODELIM), \
parser.EMPTYLINE), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day6-testInput", "r")
class Day6Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
[[
[<str None]
]]
]]''')
self.infile = open("testfiles/day6-testInput", "r")
class Day7Test(DayTest, unittest.TestCase):
def setUp(self):
def bagParse(b):
if b == " no other bags.":
return None
else:
sR = {}
for l in b.split(','):
bM = re.match(r"[\s]*(\d+) (.+) bag[s]{0,1}", l)
sR[bM.group(2)] = int(bM.group(1))
return sR
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock(str, bagParse, "bags contain"),\
""), \
)
self.infile = open("testfiles/day7-testInput", "r")
class Day7Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def bagParse(b):
if b == " no other bags.":
return None
else:
sR = {}
for l in b.split(','):
bM = re.match(r"[\s]*(\d+) (.+) bag[s]{0,1}", l)
sR[bM.group(2)] = int(bM.group(1))
return sR
self.definition = parser.InputDefinition()
self.definition.addFunction('Day7Test_Strings_custom', bagParse)
self.definition.buildersFromStr('''{{
{str Day7Test_Strings_custom "bags contain"}
}}''')
self.infile = open("testfiles/day7-testInput", "r")
class Day8Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( \
[ \
parser.LiteralBlock(str), \
parser.LiteralBlock(int), \
], parser.SPACE), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day8-testInput", "r")
class Day8Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
(#str# #int# ' ')
]]''')
self.infile = open("testfiles/day8-testInput", "r")
class Day13Test(DayTest, unittest.TestCase):
def setUp(self):
def busParser(b):
if b == "x":
return None
else:
return int(b)
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.LiteralBlock(int) \
) \
)
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.ListBlock( \
busParser, \
',')
)
)
self.infile = open("testfiles/day13-testInput", "r")
class Day13Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def busParser(b):
if b == "x":
return None
else:
return int(b)
self.definition = parser.InputDefinition()
self.definition.addFunction('Day13Test_Strings_custom', busParser)
self.definition.buildersFromStr('''#int#
[Day13Test_Strings_custom ',']''')
self.infile = open("testfiles/day13-testInput", "r")
class Day14Test(DayTest, unittest.TestCase):
def setUp(self):
def memKeyParse(b):
mP = re.match(r"mem\[(\d)+\]", b)
return mP.group(1)
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.LiteralBlock(lambda d: d.split(' = ')[1]) \
) \
)
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( [ \
parser.LiteralBlock(memKeyParse), \
parser.LiteralBlock(int), \
], \
' = '), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day14-testInput", "r")
class Day14Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def memKeyParse(b):
mP = re.match(r"mem\[(\d)+\]", b)
return mP.group(1)
self.definition = parser.InputDefinition()
self.definition.addFunction('Day14Test_Strings_custom', memKeyParse)
self.definition.addFunction('Day14Test_Strings_custom1', lambda d: d.split(' = ')[1])
self.definition.buildersFromStr('''#Day14Test_Strings_custom1#
[[
(#Day14Test_Strings_custom# #int# " = ")
]]''')
self.infile = open("testfiles/day14-testInput", "r")
class Day16Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock( \
str, \
parser.MultiBlockLine( \
[\
parser.MultiBlockLine( \
[\
parser.LiteralBlock(int), \
parser.LiteralBlock(int)\
], "-"), \
parser.MultiBlockLine( \
[\
parser.LiteralBlock(int), \
parser.LiteralBlock(int)\
], "-") \
], " or "), \
":"), \
parser.EMPTYLINE) \
)
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse()
), \
parser.SingleLineBuilder( \
parser.ListBlock(int, ',')
) \
], parser.EMPTYLINE)
)
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse() \
), \
parser.ListBuilder( \
parser.ListBlock(int, ','),
parser.EMPTYLINE \
) \
], parser.EMPTYLINE)
)
self.infile = open("testfiles/day16-testInput", "r")
class Day16Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''{{
{str ([int '-'] [int '-'] ' or ') ':'}
}}
((
#"your ticket:"#
[int ',']
))
((
#"nearby tickets:"#
[[
[int ',']
]]
))''')
self.infile = open("testfiles/day16-testInput", "r")
class Day19Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock( \
int, \
parser.MultiBlockLine( \
[\
parser.OrBlock(
[\
parser.ListBlock(int, parser.SPACE), \
parser.LiteralBlock(lambda s: s[1]) \
] \
), \
parser.ListBlock(int, parser.SPACE), \
], ' | '), \
": "), \
parser.EMPTYLINE) \
)
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBlock(str, None),
parser.EMPTYLINE \
) \
)
self.infile = open("testfiles/day19-testInput", "r")
class Day19Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day19Test_Strings_custom', lambda s: s[1])
self.definition.buildersFromStr('''{{
{int ([int ' '] or #Day19Test_Strings_custom# [int ' '] ' | ') ': '}
}}
[[
[str None]
]]''')
self.infile = open("testfiles/day19-testInput", "r")
class Day20Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[
parser.SingleLineBuilder( \
parser.MultiBlockLine( \
[\
parser.LiteralNoParse("Tile"), \
parser.LiteralBlock(lambda s: int(s[:-1])) \
], parser.SPACE), \
), \
parser.ListBuilder( \
parser.ListBlock(str, None), \
parser.EMPTYLINE) \
], \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day20-testInput", "r")
class Day20Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day20Test_Strings_custom', lambda s: int(s[:-1]))
self.definition.buildersFromStr('''[[
((
(#"Tile"# #Day20Test_Strings_custom# ' ')
[[
[str None]
]]
))
]]''')
self.infile = open("testfiles/day20-testInput", "r")
class Day21Test(DayTest, unittest.TestCase):
'''
Unfortunately this input is too weird, so the parser would have to return a list array and further handling is needed
'''
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( [\
parser.ListBlock(str, ' '), \
parser.EncapsulatedLine( \
lambda s: s[:-1], \
parser.ListBlock(str, ', ') \
), \
], \
' (contains '), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day21-testInput", "r")
class Day21Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day21Test_Strings_custom', lambda s: s[:-1])
self.definition.buildersFromStr('''[[
([str ' '] >[str ', '] Day21Test_Strings_custom< ' (contains ')
]]''')
self.infile = open("testfiles/day21-testInput", "r")
class Day21ATest(DayTest, unittest.TestCase):
'''
Unfortunately this input is too weird, so the parser would have to return a list array and further handling is needed
'''
def setUp(self):
self.composedSetMap = {}
self.composedKeysCount = {}
def composeSetMap(h):
for k in h:
if k in self.composedSetMap:
self.composedSetMap[k] = self.composedSetMap[k].intersection(h[k])
else:
self.composedSetMap[k] = h[k]
return h
def composeKeyCount(l):
for v in l:
if v in self.composedKeysCount:
self.composedKeysCount[v] += 1
else:
self.composedKeysCount[v] = 1
return l
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.HashPairBlock(
parser.EncapsulatedLine( \
lambda s: s[:-1], \
parser.ListBlock(str, ', ', composeKeyCount) \
), \
parser.SetBlock(str, ' '), \
' (contains ', reverse=True, distribute=True, callback=composeSetMap), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day21-testInput", "r")
def testParse(self):
super().testParse()
assert self.composedSetMap == testCaseSoT.Day21ATest_compSet
assert self.composedKeysCount == testCaseSoT.Day21ATest_compKeyC
class Day21ATest_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.composedSetMap = {}
self.composedKeysCount = {}
def composeSetMap(h):
for k in h:
if k in self.composedSetMap:
self.composedSetMap[k] = self.composedSetMap[k].intersection(h[k])
else:
self.composedSetMap[k] = h[k]
return h
def composeKeyCount(l):
for v in l:
if v in self.composedKeysCount:
self.composedKeysCount[v] += 1
else:
self.composedKeysCount[v] = 1
return l
self.definition = parser.InputDefinition()
self.definition.addFunction('Day21Test_Strings_custom', lambda s: s[:-1])
self.definition.addFunction('Day21ATest_composeSetMap', composeSetMap)
self.definition.addFunction('Day21ATest_composeKeyCount', composeKeyCount)
self.definition.buildersFromStr('''[[
{< rev [<str ' '] >[str ', ' / Day21ATest_composeKeyCount] Day21Test_Strings_custom< ' (contains ' / Day21ATest_composeSetMap }
]]''')
self.infile = open("testfiles/day21-testInput", "r")
def testParse(self):
super().testParse()
assert self.composedSetMap == testCaseSoT.Day21ATest_compSet
assert self.composedKeysCount == testCaseSoT.Day21ATest_compKeyC
class Day22Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse(), \
), \
parser.ListBuilder( \
parser.LiteralBlock(int), \
parser.EMPTYLINE) \
], \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day22-testInput", "r")
class Day22Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''((
##
[[
#int#
]]
))''')
self.infile = open("testfiles/day22-testInput", "r")
class Day24Test(DayTest, unittest.TestCase):
def setUp(self):
def isDir(d):
directions = ['ne','e','se','sw','w','nw']
if d in directions:
return parser.GACCEPT
else:
return parser.GCONTINUE
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListElementMunch(isDir, str, None), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day24-testInput", | |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the Simulator plugin
"""
import pytest
import math
import pennylane as qml
import numpy as np
from pennylane_cirq import SimulatorDevice
from pennylane_cirq.simulator_device import z_eigs
import cirq
class TestHelperFunctions:
"""Test the helper functions needed for SimulatorDevice."""
# fmt: off
@pytest.mark.parametrize("n,expected_output", [
(1, [1, -1]),
(2, [1, -1, -1, 1]),
(3, [1, -1, -1, 1, -1, 1, 1, -1]),
])
# fmt: on
def test_z_eigs(self, n, expected_output):
"""Tests that z_eigs returns the proper eigenvalues of an
n-fold tensor product of Pauli Z operators."""
assert np.array_equal(z_eigs(n), expected_output)
class TestDeviceIntegration:
"""Tests that the SimulatorDevice integrates well with PennyLane"""
def test_device_loading(self):
"""Tests that the cirq.simulator device is properly loaded"""
dev = qml.device("cirq.simulator", wires=2)
assert dev.num_wires == 2
assert dev.shots == 1000
assert dev.short_name == "cirq.simulator"
assert isinstance(dev, SimulatorDevice)
@pytest.fixture(scope="function")
def simulator_device_1_wire(shots, analytic):
"""Return a single wire instance of the SimulatorDevice class."""
yield SimulatorDevice(1, shots=shots, analytic=analytic)
@pytest.fixture(scope="function")
def simulator_device_2_wires(shots, analytic):
"""Return a two wire instance of the SimulatorDevice class."""
yield SimulatorDevice(2, shots=shots, analytic=analytic)
@pytest.fixture(scope="function")
def simulator_device_3_wires(shots, analytic):
"""Return a three wire instance of the SimulatorDevice class."""
yield SimulatorDevice(3, shots=shots, analytic=analytic)
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestInternalLogic:
"""Test internal logic of the SimulatorDevice class."""
def test_probability_error(self, simulator_device_1_wire):
"""Test that an error is raised in probability if the
internal state is None."""
simulator_device_1_wire.state = None
with pytest.raises(qml.DeviceError, match="Probability can not be computed because the internal state is None."):
simulator_device_1_wire.probability()
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestApply:
"""Tests that gates are correctly applied"""
# fmt: off
@pytest.mark.parametrize("name,input,expected_output", [
("PauliX", [1, 0], np.array([0, 1])),
("PauliX", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / math.sqrt(2)]),
("PauliY", [1, 0], [0, 1j]),
("PauliY", [1 / math.sqrt(2), 1 / math.sqrt(2)], [-1j / math.sqrt(2), 1j / math.sqrt(2)]),
("PauliZ", [1, 0], [1, 0]),
("PauliZ", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), -1 / math.sqrt(2)]),
("Hadamard", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)]),
("Hadamard", [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1]),
])
# fmt: on
def test_apply_operation_single_wire_no_parameters(
self, simulator_device_1_wire, tol, name, input, expected_output
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_1_wire._obs_queue = []
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply(name, wires=[0], par=[])
simulator_device_1_wire.initial_state = np.array(input, dtype=np.complex64)
simulator_device_1_wire.pre_measure()
assert np.allclose(
simulator_device_1_wire.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output", [
("CNOT", [1, 0, 0, 0], [1, 0, 0, 0]),
("CNOT", [0, 0, 1, 0], [0, 0, 0, 1]),
("CNOT", [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 1 / math.sqrt(2), 0]),
("SWAP", [1, 0, 0, 0], [1, 0, 0, 0]),
("SWAP", [0, 0, 1, 0], [0, 1, 0, 0]),
("SWAP", [1 / math.sqrt(2), 0, -1 / math.sqrt(2), 0], [1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0]),
("CZ", [1, 0, 0, 0], [1, 0, 0, 0]),
("CZ", [0, 0, 0, 1], [0, 0, 0, -1]),
("CZ", [1 / math.sqrt(2), 0, 0, -1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)]),
])
# fmt: on
def test_apply_operation_two_wires_no_parameters(
self, simulator_device_2_wires, tol, name, input, expected_output
):
"""Tests that applying an operation yields the expected output state for two wire
operations that have no parameters."""
simulator_device_2_wires._obs_queue = []
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply(name, wires=[0, 1], par=[])
simulator_device_2_wires.initial_state = np.array(input, dtype=np.complex64)
simulator_device_2_wires.pre_measure()
assert np.allclose(
simulator_device_2_wires.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,expected_output,par", [
("BasisState", [0, 0, 1, 0], [[1, 0]]),
("BasisState", [0, 0, 1, 0], [[1, 0]]),
("BasisState", [0, 0, 0, 1], [[1, 1]]),
("QubitStateVector", [0, 0, 1, 0], [[0, 0, 1, 0]]),
("QubitStateVector", [0, 0, 1, 0], [[0, 0, 1, 0]]),
("QubitStateVector", [0, 0, 0, 1], [[0, 0, 0, 1]]),
("QubitStateVector", [1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)], [[1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)]]),
("QubitStateVector", [1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)], [[1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)]]),
])
# fmt: on
def test_apply_operation_state_preparation(
self, simulator_device_2_wires, tol, name, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_2_wires._obs_queue = []
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply(name, wires=[0, 1], par=par)
simulator_device_2_wires.pre_measure()
assert np.allclose(
simulator_device_2_wires.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output,par", [
("PhaseShift", [1, 0], [1, 0], [math.pi / 2]),
("PhaseShift", [0, 1], [0, 1j], [math.pi / 2]),
("PhaseShift", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / 2 + 1j / 2], [math.pi / 4]),
("RX", [1, 0], [1 / math.sqrt(2), -1j * 1 / math.sqrt(2)], [math.pi / 2]),
("RX", [1, 0], [0, -1j], [math.pi]),
("RX", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 - 1j / 2], [math.pi / 2]),
("RY", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
("RY", [1, 0], [0, 1], [math.pi]),
("RY", [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 1], [math.pi / 2]),
("RZ", [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2]),
("RZ", [0, 1], [0, 1j], [math.pi]),
("RZ", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [math.pi / 2]),
("Rot", [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2, 0, 0]),
("Rot", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
("Rot", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [0, 0, math.pi / 2]),
("Rot", [1, 0], [-1j / math.sqrt(2), -1 / math.sqrt(2)], [math.pi / 2, -math.pi / 2, math.pi / 2]),
("Rot", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 + 1j / 2, -1 / 2 + 1j / 2], [-math.pi / 2, math.pi, math.pi]),
("QubitUnitary", [1, 0], [1j / math.sqrt(2), 1j / math.sqrt(2)], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
("QubitUnitary", [0, 1], [1j / math.sqrt(2), -1j / math.sqrt(2)], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
("QubitUnitary", [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1j], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
])
# fmt: on
def test_apply_operation_single_wire_with_parameters(
self, simulator_device_1_wire, tol, name, input, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_1_wire._obs_queue = []
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply(name, wires=[0], par=par)
simulator_device_1_wire.initial_state = np.array(input, dtype=np.complex64)
simulator_device_1_wire.pre_measure()
assert np.allclose(
simulator_device_1_wire.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output,par", [
("CRX", [0, 1, 0, 0], [0, 1, 0, 0], [math.pi / 2]),
("CRX", [0, 0, 0, 1], [0, 0, -1j, 0], [math.pi]),
("CRX", [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), 1 / 2, -1j / 2], [math.pi / 2]),
("CRY", [0, 0, 0, 1], [0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
("CRY", [0, 0, 0, 1], [0, 0, -1, 0], [math.pi]),
("CRY", [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
("CRZ", [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2]),
("CRZ", [0, 0, 0, 1], [0, 0, 0, 1j], [math.pi]),
("CRZ", [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
("CRot", [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2, 0, 0]),
("CRot", [0, 0, 0, 1], [0, 0, -1 | |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the Search Model.
Test-Suite to ensure that the Search Model is working as expected.
"""
from http import HTTPStatus
import copy
import pytest
from ppr_api.models import SearchRequest, search_utils
from ppr_api.models.search_request import CHARACTER_SET_UNSUPPORTED
from ppr_api.models.utils import now_ts_offset, format_ts
from ppr_api.exceptions import BusinessException
# Valid test search criteria
AIRCRAFT_DOT_AC_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'value': 'cfyxw'
},
'clientReferenceId': 'T-SQ-AC-1'
}
AIRCRAFT_DOT_AF_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'value': 'af16031'
},
'clientReferenceId': 'T-SQ-AF-1'
}
MHR_NUMBER_JSON = {
'type': 'MHR_NUMBER',
'criteria': {
'value': '220000'
},
'clientReferenceId': 'T-SQ-MH-1'
}
REGISTRATION_NUMBER_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'test0001'
},
'clientReferenceId': 'T-SQ-RG-3'
}
AMENDMENT_NUMBER_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TEST0007'
},
'clientReferenceId': 'T-SQ-RG-3'
}
CHANGE_NUMBER_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TEST0008'
},
'clientReferenceId': 'T-SQ-RG-3'
}
SERIAL_NUMBER_JSON = {
'type': 'SERIAL_NUMBER',
'criteria': {
'value': 'ju622994'
},
'clientReferenceId': 'T-SQ-SS-1'
}
INDIVIDUAL_DEBTOR_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'last': 'Debtor',
'first': '<NAME>'
}
},
'clientReferenceId': 'T-SQ-IS-1'
}
BUSINESS_DEBTOR_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'business': 'TEST BUS 2 DEBTOR'
}
},
'clientReferenceId': 'T-SQ-DB-1'
}
# Invalid combination of search criteria
RG_INVALID_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'debtorName': {
'business': 'BROWN AUTOMOTIVE LTD.'
}
}
}
MH_INVALID_JSON = {
'type': 'MHR_NUMBER',
'criteria': {
'debtorName': {
'business': 'BROWN AUTOMOTIVE LTD.'
}
}
}
AC_INVALID_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'debtorName': {
'business': 'BROWN AUTOMOTIVE LTD.'
}
}
}
SS_INVALID_JSON = {
'type': 'SERIAL_NUMBER',
'criteria': {
'debtorName': {
'business': 'BROWN AUTOMOTIVE LTD.'
}
}
}
IS_INVALID_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'business': 'BROWN AUTOMOTIVE LTD.'
}
}
}
BS_INVALID_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'last': 'Smith',
'first': 'John'
}
}
}
# Discharged financing statement criteria
BS_DISCHARGED_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'business': 'ZZZZZ99'
}
},
'clientReferenceId': 'T-SQ-DB-4'
}
AC_DISCHARGED_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'value': 'zzzzz999999'
},
'clientReferenceId': 'T-SQ-AC-4'
}
SS_DISCHARGED_JSON = {
'type': 'SERIAL_NUMBER',
'criteria': {
'value': 'zzzzz999999'
},
'clientReferenceId': 'T-SQ-SS-4'
}
MH_DISCHARGED_JSON = {
'type': 'MHR_NUMBER',
'criteria': {
'value': '399999'
},
'clientReferenceId': 'T-SQ-MH-4'
}
RG_DISCHARGED_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TEST0014'
},
'clientReferenceId': 'T-SQ-RG-7'
}
IS_DISCHARGED_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'last': 'TEST IND DEBTOR',
'first': 'zzzzz99'
}
},
'clientReferenceId': 'T-SQ-IS-3'
}
# Expired financing statement criteria
BS_EXPIRED_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'business': 'XXXXX99'
}
},
'clientReferenceId': 'T-SQ-DB-4'
}
AC_EXPIRED_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'value': 'xxxxx999999'
},
'clientReferenceId': 'T-SQ-AC-4'
}
SS_EXPIRED_JSON = {
'type': 'SERIAL_NUMBER',
'criteria': {
'value': 'XXXXX999999'
},
'clientReferenceId': 'T-SQ-SS-4'
}
MH_EXPIRED_JSON = {
'type': 'MHR_NUMBER',
'criteria': {
'value': '299999'
},
'clientReferenceId': 'T-SQ-MH-4'
}
RG_EXPIRED_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TEST0013'
},
'clientReferenceId': 'T-SQ-RG-7'
}
IS_EXPIRED_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'last': 'TEST IND DEBTOR',
'first': 'XXXXX99'
}
},
'clientReferenceId': 'T-SQ-IS-3'
}
# Test valid criteria with no results.
BS_NONE_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'business': 'XZXZXZXZ'
}
},
'clientReferenceId': 'T-SQ-DB-4'
}
AC_NONE_JSON = {
'type': 'AIRCRAFT_DOT',
'criteria': {
'value': 'TESTXXXX'
},
'clientReferenceId': 'T-SQ-AC-4'
}
SS_NONE_JSON = {
'type': 'SERIAL_NUMBER',
'criteria': {
'value': 'TESTXXXX'
},
'clientReferenceId': 'T-SQ-SS-4'
}
MH_NONE_JSON = {
'type': 'MHR_NUMBER',
'criteria': {
'value': '999999'
},
'clientReferenceId': 'T-SQ-MH-4'
}
RG_NONE_JSON = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TESTXXXX'
},
'clientReferenceId': 'T-SQ-RG-7'
}
IS_NONE_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'last': 'TEST IND DEBTOR',
'first': 'XZXZXZXZ'
}
},
'clientReferenceId': 'T-SQ-IS-3'
}
IS_INVALID_NAME_JSON = {
'type': 'INDIVIDUAL_DEBTOR',
'criteria': {
'debtorName': {
'first': 'FN répertoire',
'middle': 'MN répertoire',
'last': 'LN répertoire'
}
},
'clientReferenceId': 'T-SQ-IS-3'
}
BS_INVALID_NAME_JSON = {
'type': 'BUSINESS_DEBTOR',
'criteria': {
'debtorName': {
'business': '\U0001d5c4\U0001d5c6/\U0001d5c1'
}
},
'clientReferenceId': 'T-SQ-DB-4'
}
# testdata pattern is ({search type}, {JSON data})
TEST_VALID_DATA = [
('AC', AIRCRAFT_DOT_AC_JSON),
('AF', AIRCRAFT_DOT_AF_JSON),
('AM', AMENDMENT_NUMBER_JSON),
('CH', CHANGE_NUMBER_JSON),
('RG', REGISTRATION_NUMBER_JSON),
('MH', MHR_NUMBER_JSON),
('IS', INDIVIDUAL_DEBTOR_JSON),
('BS', BUSINESS_DEBTOR_JSON),
('SS', SERIAL_NUMBER_JSON)
]
# testdata pattern is ({search type}, {JSON data})
TEST_NONE_DATA = [
('RG', RG_NONE_JSON),
('MH', MH_NONE_JSON),
('AC', AC_NONE_JSON),
('SS', SS_NONE_JSON),
('IS', IS_NONE_JSON),
('BS', BS_NONE_JSON)
]
# testdata pattern is ({search type}, {JSON data})
TEST_INVALID_DATA = [
('RG', RG_INVALID_JSON),
('MH', MH_INVALID_JSON),
('AC', AC_INVALID_JSON),
('SS', SS_INVALID_JSON),
('IS', IS_INVALID_JSON),
('BS', BS_INVALID_JSON)
]
# testdata pattern is ({search type}, {JSON data}, {expected # of results})
TEST_VALID_DATA_COUNT = [
('SS', SERIAL_NUMBER_JSON, 5),
('IS', INDIVIDUAL_DEBTOR_JSON, 3),
('BS', BUSINESS_DEBTOR_JSON, 2)
]
# testdata pattern is ({search type}, {JSON data}, {excluded match criteria})
TEST_DISCHARGED_DATA = [
('RG', RG_DISCHARGED_JSON, 'TEST0014'),
('AC', SS_DISCHARGED_JSON, 'ZZZZZ999999'),
('MH', SS_DISCHARGED_JSON, 'ZZZZZ999999'),
('SS', SS_DISCHARGED_JSON, 'ZZZZZ999999'),
('IS', IS_DISCHARGED_JSON, 'ZZZZZ99'),
('BS', BS_DISCHARGED_JSON, 'ZZZZZ99')
]
# testdata pattern is ({search type}, {JSON data}, {excluded match criteria})
TEST_EXPIRED_DATA = [
('RG', RG_EXPIRED_JSON, 'TEST0013'),
('AC', SS_EXPIRED_JSON, 'XXXXX999999'),
('MH', SS_EXPIRED_JSON, 'XXXXX999999'),
('SS', SS_EXPIRED_JSON, 'XXXXX999999'),
('IS', IS_EXPIRED_JSON, 'XXXXX99'),
('BS', BS_EXPIRED_JSON, 'XXXXX99')
]
# testdata pattern is ({description}, {reg number})
TEST_REGISTRATION_TYPES = [
('Financing Statement', 'TEST0001'),
('Amendment', 'TEST0007'),
('Change', 'TEST0008'),
('Discharge', 'TEST00D4'),
('Renewal', 'TEST00R5')
]
TEST_DEBTOR_NAME_DATA = [
('Valid ind names', IS_NONE_JSON, True, None),
('Valid bus name', BS_NONE_JSON, True, None),
('Invalid bus name', BS_INVALID_NAME_JSON, False,
CHARACTER_SET_UNSUPPORTED.format('\U0001d5c4\U0001d5c6/\U0001d5c1')),
('Invalid ind first name', IS_INVALID_NAME_JSON, False,
CHARACTER_SET_UNSUPPORTED.format('FN répertoire')),
('Invalid ind middle name', IS_INVALID_NAME_JSON, False,
CHARACTER_SET_UNSUPPORTED.format('MN répertoire')),
('Invalid ind last name', IS_INVALID_NAME_JSON, False,
CHARACTER_SET_UNSUPPORTED.format('LN répertoire'))
]
# testdata pattern is ({mhr_number}, {expected_num})
TEST_MHR_NUMBER_DATA = [
('005794', '005794'),
(' 5794 ', '005794'),
('5794', '005794'),
('05794', '005794')
]
def test_search_no_account(session):
"""Assert that a search query with no account id returns the expected result."""
json_data = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': 'TEST0001'
},
'clientReferenceId': 'T-SQ-RG-4'
}
query = SearchRequest.create_from_json(json_data, None)
query.search()
assert query.id
assert query.search_response
@pytest.mark.parametrize('search_type,json_data', TEST_VALID_DATA)
def test_search_valid(session, search_type, json_data):
"""Assert that a valid search returns the expected search type result."""
query = SearchRequest.create_from_json(json_data, 'PS12345', 'UNIT_TEST')
query.search()
assert not query.updated_selection
result = query.json
# print(result)
assert query.id
assert query.search_response
assert query.account_id == 'PS12345'
assert query.user_id == 'UNIT_TEST'
assert result['searchId']
assert result['searchQuery']
assert result['searchDateTime']
assert result['totalResultsSize']
assert result['maxResultsSize']
assert result['returnedResultsSize']
assert len(result['results']) >= 1
assert result['results'][0]['baseRegistrationNumber']
assert result['results'][0]['createDateTime']
assert result['results'][0]['matchType'] == 'EXACT'
assert result['results'][0]['registrationType']
if search_type == 'BS':
assert result['results'][0]['debtor']
assert result['results'][0]['debtor']['businessName'] == 'TEST BUS 2 DEBTOR'
elif search_type == 'IS':
assert result['results'][0]['debtor']
assert result['results'][0]['debtor']['personName']
assert result['results'][0]['debtor']['personName']['last'] == 'DEBTOR'
assert result['results'][0]['debtor']['personName']['first'] == '<NAME>'
if result['results'][0]['baseRegistrationNumber'] == 'TEST0004':
assert result['results'][0]['debtor']['birthDate']
elif search_type == 'AM':
assert result['results'][0]['baseRegistrationNumber'] == 'TEST0001'
assert result['results'][0]['registrationNumber'] == 'TEST0007'
elif search_type == 'CH':
assert result['results'][0]['baseRegistrationNumber'] == 'TEST0001'
assert result['results'][0]['registrationNumber'] == 'TEST0008'
elif search_type == 'RG':
assert result['results'][0]['baseRegistrationNumber'] == 'TEST0001'
else:
assert result['results'][0]['vehicleCollateral']
assert result['results'][0]['vehicleCollateral']['year']
assert result['results'][0]['vehicleCollateral']['make']
assert result['results'][0]['vehicleCollateral']['serialNumber']
if search_type != 'MH':
assert result['results'][0]['vehicleCollateral']['model']
if search_type == 'AF':
assert result['results'][0]['vehicleCollateral']['type'] == 'AF'
assert result['results'][0]['vehicleCollateral']['serialNumber'] == 'AF16031'
elif search_type == 'AC':
assert result['results'][0]['vehicleCollateral']['type'] == 'AC'
assert result['results'][0]['vehicleCollateral']['serialNumber'] == 'CFYXW'
elif search_type == 'MH':
assert result['results'][0]['vehicleCollateral']['manufacturedHomeRegistrationNumber'] == '220000'
@pytest.mark.parametrize('search_type,json_data', TEST_NONE_DATA)
def test_search_no_results(session, search_type, json_data):
"""Assert that a search query with no results returns the expected result."""
query = SearchRequest.create_from_json(json_data, None)
query.search()
assert query.id
assert not query.search_response
assert query.returned_results_size == 0
@pytest.mark.parametrize('search_type,json_data,excluded_match', TEST_EXPIRED_DATA)
def test_search_expired(session, search_type, json_data, excluded_match):
"""Assert that an expired financing statement is excluded from the search results."""
query = SearchRequest.create_from_json(json_data, None)
query.search()
result = query.json
assert result['searchId']
if search_type == 'RG':
assert not query.search_response
assert query.returned_results_size == 0
elif 'results' in result:
for r in result['results']:
if search_type == 'BS':
assert r['debtor']['businessName'] != excluded_match
elif search_type == 'IS':
assert r['debtor']['personName']['first'] != excluded_match
else:
assert r['vehicleCollateral']['serialNumber'] != excluded_match
@pytest.mark.parametrize('search_type,json_data,excluded_match', TEST_DISCHARGED_DATA)
def test_search_discharged(session, search_type, json_data, excluded_match):
"""Assert that a discharged financing statement is excluded from the search results."""
query = SearchRequest.create_from_json(json_data, None)
query.search()
result = query.json
assert result['searchId']
if search_type == 'RG':
assert not query.search_response
assert query.returned_results_size == 0
elif 'results' in result:
for r in result['results']:
if search_type == 'BS':
assert r['debtor']['businessName'] != excluded_match
elif search_type == 'IS':
assert r['debtor']['personName']['first'] != excluded_match
else:
assert r['vehicleCollateral']['serialNumber'] != excluded_match
@pytest.mark.parametrize('desc,reg_num', TEST_REGISTRATION_TYPES)
def test_registration_types(session, desc, reg_num):
"""Assert that a reg num searches on different registations returns the expected result."""
# setup
json_data = {
'type': 'REGISTRATION_NUMBER',
'criteria': {
'value': reg_num
}
}
query = SearchRequest.create_from_json(json_data, 'PS12345', 'UNIT_TEST')
query.search()
result = query.json
# print(result)
assert query.id
assert query.search_response
assert query.account_id == 'PS12345'
assert query.user_id == 'UNIT_TEST'
assert result['searchId']
assert result['searchQuery']
assert result['searchDateTime']
assert result['totalResultsSize'] == 1
assert result['maxResultsSize']
assert result['returnedResultsSize'] == 1
assert | |
0x10ff12.
Call to 0x10ff12 returns 'OK' as the result of timeout change.
:param timeout: power timeout in range (0x0001, 0xfff0)
:type timeout: int
:param wait: wait for response
:type wait: bool
"""
timeout = max(min(0xfff0, timeout), 0x0001)
strtimeout = '{0:0{1}X}'.format(timeout, 4)
if wait:
return self.askPrinter(bytes.fromhex('10ff12' + strtimeout))
else:
self.tellPrinter(bytes.fromhex('10ff12' + strtimeout))
def setConcentration(self, cons, wait=False):
"""
Sets the printing concentration using 0x10ff1000 opcode.
Currently allowed values are 0, 1, 2 which represents light, medium, hard
(heating intensivity). Other values are not tested yet.
:param cons: concentration (0, 1, 2)
:type cons: int
:param wait: wait for response
:type wait: bool
"""
opcode = ''
if cons <= 0:
opcode = '10ff100000'
elif cons == 1:
opcode = '10ff100001'
elif cons >= 2:
opcode = '10ff100002'
if wait:
return self.askPrinter(bytes.fromhex(opcode))
else:
self.tellPrinter(bytes.fromhex(opcode))
def reset(self):
"""
Performs reset operation (The initial purpose of it is stoll unknown)
required before printing stream of bytes in a binary image.
Opcode for this operation is 0x10fffe01 followed by 0x000000000000000000000000.
This operation has to be performed before any other printing operation and after
connect to printer.
"""
self.tellPrinter(bytes.fromhex('10fffe01000000000000000000000000'))
def printBreak(self, size=0x40):
"""
Asks printer to print a line break with specified size (in pixels) using 0x1b4a.
Value expected in range (0x01, 0xff).
:param text: size of break in range (0x1, 0xff)
:type text: int
"""
size = min(0xff, max(0x01, size))
strsize = '{0:0{1}X}'.format(size, 2)
self.tellPrinter(bytes.fromhex('1b4a' + strsize))
def writeASCII(self, text='\n', wait=False):
"""
Deprecated.
Write raw ASCII string to the printer.
By default this printer accepts an ascii string for printing it with raw monospace
font. Printer has internal buffer (getRowCharacters()) that will
accumulate the received characters. Printer will print out the buffer if meets a '\n'
character or buffer overflows.
This function expects only ASCII characters without control codes (0x00-0x20, 0xFF).
This function is not recommended to use while printer is in byte stream printing mode
or while it expects arguments for some of it's opcodes.
If string contains sequently repeating '\n' characters, the printer may freeze. So
it's recommended to use printASCII() instead.
:param text: string containing ASCII characters
:type text: str
:param wait: wait for response
:type wait: bool
"""
if wait:
return self.askPrinter(bytes(text, 'ascii'))
else:
self.tellPrinter(bytes(text, 'ascii'))
def printlnASCII(self, text='\n', delay=0.25):
"""
Write raw ASCII string to the printer.
By default this printer accepts an ascii string for printing it with raw monospace
font. Printer has internal buffer (getRowCharacters()) that will
accumulate the received characters. Printer will print out the buffer if meets a '\n'
character or buffer overflows.
This function expects only ASCII characters without control codes (0x00-0x20, 0xFF).
This function is not recommended to use while printer is in byte stream printing mode
or while it expects arguments for some of it's opcodes.
If string contains sequently repeating '\n' characters, they will be replaced with
printBreak(30) which matches the length of the '\n\n'. This function automatically
slices string into pieces of size getRowCharacters() and waits till new piece being
printed.
This function acts as println. This function will print out the data stored in the
buffer of printASCII().
:param text: string containing ASCII characters
:type text: str
:param delay: delay between sending each line
:type delay: float
"""
# Remove non-ASCII & control (except \n)
text = ''.join([i for i in text if (31 < ord(i) or ord(i) == 10) and ord(i) < 127])
## Remove last '\n' to avoid it's duplication
#if len(text) > 0 and text[-1] == '\n':
# text = text[:-1]
#
# Check for empty and print out newline
text = self.printBuffer + text
if len(text) == 0:
self.printBreak(30)
time.sleep(delay)
return
lines = text.split('\n')
self.printBuffer = ''
for l in lines:
# Replace every empty line with break matching newline height
if len(l) == 0:
self.printBreak(30)
time.sleep(delay)
else:
# Split to lines
parts = [l[i:i+self.getRowCharacters()] for i in range(0, len(l), self.getRowCharacters())]
for i, p in enumerate(parts):
self.tellPrinter(bytes(p, 'ascii'))
if i != 0:
time.sleep(delay)
# Push last line from the buffer
self.tellPrinter(bytes('\n', 'ascii'))
time.sleep(delay)
def printASCII(self, text='\n', delay=0.25):
"""
Write raw ASCII string to the printer.
By default this printer accepts an ascii string for printing it with raw monospace
font. Printer has internal buffer (getRowCharacters()) that will
accumulate the received characters. Printer will print out the buffer if meets a '\n'
character or buffer overflows.
This function expects only ASCII characters without control codes (0x00-0x20, 0xFF).
This function is not recommended to use while printer is in byte stream printing mode
or while it expects arguments for some of it's opcodes.
If string contains sequently repeating '\n' characters, they will be replaced with
printBreak(30) which matches the length of the '\n\n'. This function automatically
slices string into pieces of size getRowCharacters() and waits till new piece being
printed.
This function uses in class buffer to store tail of the text if text didn't end with
'\n'.
:param text: string containing ASCII characters
:type text: str
:param delay: delay between sending each line
:type delay: float
"""
# Remove non-ASCII & control (except \n)
text = ''.join([i for i in text if (31 < ord(i) or ord(i) == 10) and ord(i) < 127])
# Check for empty and print out newline
text = self.printBuffer + text
self.printBuffer = ''
if len(text) == 0:
return
endLineBreak = text[-1] == '\n'
# Remove last '\n' to avoid it's duplication
if len(text) > 0 and text[-1] == '\n':
if len(text) == 1:
self.printBreak(30)
time.sleep(delay)
return
text = text[:-1]
lines = text.split('\n')
for i, l in enumerate(lines):
# Replace every empty line with break matching newline height
if len(l) == 0:
self.printBreak(30)
time.sleep(delay)
else:
# Split to lines
parts = [l[i:i+self.getRowCharacters()] for i in range(0, len(l), self.getRowCharacters())]
for j, p in enumerate(parts):
# If this is the last part of the text and it ends with '\n', push it
if j == len(parts)-1:
if i == len(lines)-1:
if endLineBreak:
self.tellPrinter(bytes(p, 'ascii'))
time.sleep(delay)
self.tellPrinter(bytes('\n', 'ascii'))
time.sleep(delay)
else:
self.printBuffer = p
# Push out the string that is a full row
if len(p) == self.getRowCharacters():
self.tellPrinter(bytes(p, 'ascii'))
time.sleep(delay)
self.tellPrinter(bytes('\n', 'ascii'))
time.sleep(delay)
self.printBuffer = ''
else:
self.tellPrinter(bytes(p, 'ascii'))
time.sleep(delay)
self.tellPrinter(bytes('\n', 'ascii'))
time.sleep(delay)
else:
self.tellPrinter(bytes(p, 'ascii'))
if j != 0:
time.sleep(delay)
def flushASCII(self, delay=0.25):
"""
Prints out the buffer used in printASCII() followed by newline.
:param delay: delay between sending each line
:type delay: float
"""
if len(self.printBuffer) > 0:
self.tellPrinter(bytes(self.printBuffer, 'ascii'))
time.sleep(delay)
self.tellPrinter(bytes('\n', 'ascii'))
time.sleep(delay)
self.printBuffer = ''
def printRow(self, rowbytes):
"""
Send array of pixels represented with rowbytes bytes to the printer.
This operation invokes printer image / byte stream printing mode and prints out a single
row.
rowbytes expected to be bytes type with size matching the printer extected row size for
specified printer model (Refer to getRowBytes() for more information).
If amount of bytes exceeeds or under the required by this printer type, bytes array will
be cut or pad with zeros.
:param rowbytes: bytes array of size getRowBytes() representing a single row
:type rowbytes: bytes
"""
expectedLen = self.getRowBytes()
if len(rowbytes) < expectedLen:
rowbytes = rowbytes.ljust(expectedLen, bytes.fromhex('00'))
elif len(rowbytes) > expectedLen:
rowbytes = rowbytes[:expectedLen]
self.reset()
# Notify printer about incomming $expectedLen bytes row
if self.printerType == PrinterType.A6:
self.tellPrinter(bytes.fromhex('1d76300030000100'))
else:
self.tellPrinter(bytes.fromhex('1d76300048000100'))
self.tellPrinter(rowbytes)
# We're done here
def printImageRowBytesList(self, imagebytes, delay=0.01):
"""
Performs printing of the Image bytes. Image width expected to match getRowBytes(), in other
case it will be cut or pad | |
<gh_stars>10-100
"""Planar segway example."""
from matplotlib.pyplot import figure, grid, legend, plot, show, subplot, suptitle, title
from numpy import arange, array, concatenate, cos, identity, linspace, ones, sin, tanh, tile, zeros
from numpy.random import uniform
from scipy.io import loadmat, savemat
from sys import argv
from ..controllers import CombinedController, PDController, QPController, SaturationController
from ..learning import evaluator, KerasTrainer, sigmoid_weighting, SimulationHandler
from ..lyapunov_functions import RESQuadraticControlLyapunovFunction
from ..outputs import PDOutput, RoboticSystemOutput
from ..systems import AffineControlSystem
filename = argv[1]
class SegwaySystem(AffineControlSystem):
"""Planar Segway system. State is [x, theta, x_dot, theta_dot], where x is
the position of the Segway base in m, x_dot is the velocity in m / sec,
theta is the angle of the frame in rad clockwise from upright, and
theta_dot is the angular rate in rad / sec. The input is [u], where u is
positive or negative percent of maximum motor voltage.
Attributes:
x_ddot drift component, f3: float * float * float -> float
theta_ddot drift component, f4: float * float * float -> float
x_ddot actuation component, g3: float -> float
theta_ddot actuation component, g4: float -> float
"""
def __init__(self, m_b=44.798, m_w=2.485, J_w=0.055936595310797, a_2=-0.023227187592750, c_2=0.166845864363019, B_2=2.899458828344427, R= 0.086985141514373, K=0.141344665167821, r=0.195, g=9.81, f_d=0.076067344020759, f_v=0.002862586216301, V_nom=57):
"""Initialize a SegwaySystem object.
Inputs:
Mass of frame (kg), m_b: float
Mass of one wheel (kg), m_w: float
Inertia of wheel (kg*m^2), J_w: float
x position of frame (m), a_2: float
z position of frame (m), c_2: float
yy inertia of frame (kg*m^2), B_2: float
Electrical resistance of motors (Ohm), R: float
Torque constant of motors (N*m/A), K: float
Radius of wheels (m), r: float
Gravity constant (m/s^2), g: float
Dry friction coefficient (N*m), f_d: float
Viscous friction coefficient (N*m*s), f_v: float
Nominal battery voltage (V), V_nom: float
"""
self.f_3 = lambda x_dot, theta, theta_dot: (1/2) * R ** (-1) * (4 * \
B_2 * J_w + 4 * a_2 ** 2 * J_w * m_b + 4 * c_2 ** 2 * J_w * m_b + 2 * \
B_2 * m_b * r ** 2 + a_2 ** 2 * m_b ** 2 * r ** 2 + c_2 ** 2 * m_b ** \
2 * r ** 2 + 4 * B_2 * m_w * r ** 2 + 4 * a_2 ** 2 * m_b * m_w * r ** \
2 + 4 * c_2 ** 2 * m_b * m_w * r ** 2 + (a_2 ** 2 + (-1) * c_2 ** 2) * \
m_b ** 2 * r ** 2 * cos(2 * theta) + 2 * a_2 * c_2 * m_b ** 2 * r ** 2 \
* sin(2 * theta)) ** (-1) * (800 * B_2 * K ** 2 * theta_dot * r + 800 \
* a_2 ** 2 * K ** 2 * m_b * theta_dot * r + 800 * c_2 ** 2 * K ** 2 * \
m_b * theta_dot * r + 800 * B_2 * f_v * theta_dot * r * R + 800 * a_2 \
** 2 * f_v * m_b * theta_dot * r * R + 800 * c_2 ** 2 * f_v * m_b * \
theta_dot * r * R + (-800) * B_2 * K ** 2 * x_dot + (-800) * a_2 ** 2 \
* K ** 2 * m_b * x_dot + (-800) * c_2 ** 2 * K ** 2 * m_b * x_dot + \
(-800) * B_2 * f_v * R * x_dot + (-800) * a_2 ** 2 * f_v * m_b * R * \
x_dot + (-800) * c_2 ** 2 * f_v * m_b * R * x_dot + 80 * c_2 * K ** 2 \
* m_b * theta_dot * r ** 2 * cos(theta) + 80 * c_2 * f_v * m_b * \
theta_dot * r ** 2 * R * cos(theta) + 4 * a_2 * B_2 * m_b * theta_dot \
** 2 * r ** 2 * R * cos(theta) + 4 * a_2 ** 3 * m_b ** 2 * theta_dot \
** 2 * r ** 2 * R * cos(theta) + 4 * a_2 * c_2 ** 2 * m_b ** 2 * \
theta_dot ** 2 * r ** 2 * R * cos(theta) + (-80) * c_2 * K ** 2 * m_b \
* r * x_dot * cos(theta) + (-80) * c_2 * f_v * m_b * r * R * x_dot * \
cos(theta) + (-4) * a_2 * c_2 * g * m_b ** 2 * r ** 2 * R * cos(2 * \
theta) + (-80) * a_2 * K ** 2 * m_b * theta_dot * r ** 2 * sin(theta) \
+ (-80) * a_2 * f_v * m_b * theta_dot * r ** 2 * R * sin(theta) + 4 * \
B_2 * c_2 * m_b * theta_dot ** 2 * r ** 2 * R * sin(theta) + 4 * a_2 \
** 2 * c_2 * m_b ** 2 * theta_dot ** 2 * r ** 2 * R * sin(theta) + 4 * \
c_2 ** 3 * m_b ** 2 * theta_dot ** 2 * r ** 2 * R * sin(theta) + 80 * \
a_2 * K ** 2 * m_b * r * x_dot * sin(theta) + 80 * a_2 * f_v * m_b * r \
* R * x_dot * sin(theta) + 2 * a_2 ** 2 * g * m_b ** 2 * r ** 2 * R * \
sin(2 * theta) + (-2) * c_2 ** 2 * g * m_b ** 2 * r ** 2 * R * sin(2 * \
theta) + 4 * f_d * r * R * (10 * (B_2 + (a_2 ** 2 + c_2 ** 2) * m_b) + \
c_2 * m_b * r * cos(theta) + (-1) * a_2 * m_b * r * sin(theta)) * \
tanh(500 * r ** (-1) * (2 * theta_dot * r + (-2) * x_dot)) + (-4) * \
f_d * r * R * (10 * (B_2 + (a_2 ** 2 + c_2 ** 2) * m_b) + c_2 * m_b * \
r * cos(theta) + (-1) * a_2 * m_b * r * sin(theta)) * tanh(500 * r ** \
(-1) * ((-2) * theta_dot * r + 2 * x_dot)))
self.f_4 = lambda x_dot, theta, theta_dot: r ** (-1) * R ** (-1) * (4 * \
B_2 * J_w + 4 * a_2 ** 2 * J_w * m_b + 4 * c_2 ** 2 * J_w * m_b + 2 *
B_2 * m_b * r ** 2 + a_2 ** 2 * m_b ** 2 * r ** 2 + c_2 ** 2 * m_b ** \
2 * r ** 2 + 4 * B_2 * m_w * r ** 2 + 4 * a_2 ** 2 * m_b * m_w * r ** \
2 + 4 * c_2 ** 2 * m_b * m_w * r ** 2 + (a_2 ** 2 + (-1) * c_2 ** 2) * \
m_b ** 2 * r ** 2 * cos(2 * theta) + 2 * a_2 * c_2 * m_b ** 2 * r ** \
2 * sin(2 * theta)) ** (-1) * ((-80) * J_w * K ** 2 * theta_dot * r + \
(-40) * K ** 2 * m_b * theta_dot * r ** 3 + (-80) * K ** 2 * m_w * \
theta_dot * r ** 3 + (-80) * f_v * J_w * theta_dot * r * R + (-40) * \
| |
<reponame>secuof/binaryanalysis
#!/usr/bin/python
## Binary Analysis Tool
## Copyright 2011-2016 <NAME> for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This module contains methods that should be run before any of the other
scans.
Most of these methods are to verify the type of a file, so it can be tagged
and subsequently be ignored by other scans. For example, if it can already be
determined that an entire file is a GIF file, it can safely be ignored by a
method that only applies to file systems.
Tagging files reduces false positives (especially ones caused by LZMA
unpacking), and in many cases also speeds up the process, because it is clear
very early in the process which files can be ignored.
The methods here are conservative: not all files that could be tagged will be
tagged. Since tagging is just an optimisation this does not really matter: the
files will be scanned and tagged properly later on, but more time might be
spent, plus there might be false positives (mostly LZMA).
'''
import sys, os, subprocess, os.path, shutil, stat, struct, zlib, binascii
import tempfile, re, magic, hashlib, HTMLParser, math
import fsmagic, extractor, javacheck, elfcheck
## method to search for all the markers in magicscans
## Although it is in this method it is actually not a pre-run scan, so perhaps
## it should be moved to bruteforcescan.py instead.
## This method returns a tuple with three results:
## * offsets :: a dictionary with offsets per marker
## * offsettokeys :: a dictionary that maps an offset to a marker
## * isascii :: a flag to indicate that the data found was ASCII
## data only or not
def genericMarkerSearch(filename, magicscans, optmagicscans, offset=0, length=0, debug=False):
datafile = open(filename, 'rb')
databuffer = []
## dictionary with offsets per marker
offsets = {}
## mapping of offset to keys
offsettokeys = {}
## flag that indicates if the data is ASCII
isascii = True
datafile.seek(offset)
if length == 0:
databuffer = datafile.read(2000000)
else:
databuffer = datafile.read(length)
marker_keys = magicscans + optmagicscans
bufkeys = []
for key in marker_keys:
## use a set to have automatic deduplication. Each offset
## should be in the list only once.
offsets[key] = set()
if not key in fsmagic.fsmagic:
continue
bufkeys.append((key,fsmagic.fsmagic[key]))
## don't read the file if there are no keys to process
if bufkeys == []:
datafile.close()
return (offsets, offsettokeys, isascii)
datafile2 = open(filename, 'rb')
while databuffer != '':
if isascii:
if not extractor.isPrintables(databuffer):
isascii = False
for bkey in bufkeys:
(key, bufkey) = bkey
if not bufkey in databuffer:
continue
res = databuffer.find(bufkey)
while res != -1:
## hardcode a few checks to avoid possibly passing
## around many offsets to many methods
if key == 'jpeg':
datafile2.seek(offset+res+2)
checkkey = datafile2.read(1)
if len(checkkey) == 1:
if checkkey == '\xff':
offsets[key].add(offset + res)
elif key == 'compress':
datafile2.seek(offset+res+2)
compressdata = datafile2.read(1)
if len(compressdata) == 1:
compressbits = ord(compressdata) & 0x1f
if compressbits >= 9 and compressbits <= 16:
offsets[key].add(offset + res)
elif key == 'ttf':
datafile2.seek(offset+res+4)
fontbytes = datafile2.read(2)
if len(fontbytes) == 2:
numberoftables = struct.unpack('>H', fontbytes)[0]
if numberoftables != 0:
## followed by searchrange
fontbytes = datafile2.read(2)
if len(fontbytes) == 2:
searchrange = struct.unpack('>H', fontbytes)[0]
## sanity check, see specification
if pow(2, int(math.log(numberoftables, 2)+4)) == searchrange:
offsets[key].add(offset + res)
else:
offsets[key].add(offset + res)
res = databuffer.find(bufkey, res+1)
if length != 0:
break
## move the offset 1999950
datafile.seek(offset + 1999950)
## read 2000000 bytes with a 50 bytes overlap with the previous
## read so we don't miss any pattern. This needs to be updated
## as soon as patterns >= 50 are used.
databuffer = datafile.read(2000000)
if len(databuffer) >= 50:
offset = offset + 1999950
else:
offset = offset + len(databuffer)
datafile2.close()
datafile.close()
for key in marker_keys:
offsets[key] = list(offsets[key])
## offsets are expected to be sorted.
offsets[key].sort()
for offset in offsets[key]:
if offset in offsettokeys:
offsettokeys[offset].append(key)
else:
offsettokeys[offset] = [key]
return (offsets, offsettokeys, isascii)
## Verify a file is an XML file using xmllint.
## Actually this *could* be done with xml.dom.minidom (although some parser settings should be set
## to deal with unresolved entities) to avoid launching another process
def searchXML(filename, cursor, conn, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None, filehashes=None):
newtags = []
datafile = open(filename, 'rb')
## first check if the file starts with a byte order mark for a UTF-8 file
## https://en.wikipedia.org/wiki/Byte_order_mark
offset = 0
bommarks = datafile.read(3)
if bommarks == '\xef\xbb\xbf':
offset = 3
datafile.seek(offset)
firstchar = datafile.read(1)
## xmllint expects a file to start either with whitespace,
## or a < character.
while True:
if firstchar not in ['\n', '\r', '\t', ' ', '\v', '<']:
datafile.close()
return newtags
if firstchar == '<':
databytes = datafile.read(4)
datafile.close()
try:
if databytes.lower() != '?xml':
return newtags
break
except:
return newtags
else:
firstchar = datafile.read(1)
p = subprocess.Popen(['xmllint','--noout', "--nonet", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode == 0:
newtags.append("xml")
return newtags
## Verify a file only contains text. This depends on the settings of the
## Python installation.
## The default encoding in Python 2 is 'ascii'. It cannot be guaranteed
## that it has been set by the user to another encoding.
## Since other encodings also contain ASCII it should not be much of an issue.
##
## Interesting link with background info:
## * http://fedoraproject.org/wiki/Features/PythonEncodingUsesSystemLocale
def verifyText(filename, cursor, conn, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None, filehashes=None):
newtags = []
datafile = open(filename, 'rb')
databuffer = []
offset = 0
datafile.seek(offset)
databuffer = datafile.read(100000)
while databuffer != '':
if not extractor.isPrintables(databuffer):
datafile.close()
newtags.append("binary")
return newtags
## move the offset 100000
datafile.seek(offset + 100000)
databuffer = datafile.read(100000)
offset = offset + len(databuffer)
newtags.append("text")
newtags.append("ascii")
datafile.close()
return newtags
## verify WAV files
## http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
## https://sites.google.com/site/musicgapi/technical-documents/wav-file-format
def verifyWav(filename, cursor, conn, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None, filehashes=None):
## some chunks observed in the wild. 'LGWV' and 'bext' seem to be extensions
validchunks = ['fmt ', 'fact', 'data', 'cue ', 'list', 'plst', 'labl', 'ltxt', 'note', 'smpl', 'inst', 'bext', 'LGWV']
## the next four characters should be 'WAVE'
fourcc = 'WAVE'
newtags = verifyRiff(filename, validchunks, fourcc, tempdir, tags, offsets, scanenv, debug, unpacktempdir)
if newtags != []:
newtags.append('wav')
newtags.append('audio')
return newtags
## verify WebP files
## https://developers.google.com/speed/webp/docs/riff_container
def verifyWebP(filename, cursor, conn, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None, filehashes=None):
validchunks = ['VP8 ', 'VP8L', 'VP8X', 'ANIM', 'ANMF', 'ALPH', 'ICCP', 'EXIF', 'XMP ']
## the next four characters should be 'WEBP'
fourcc = 'WEBP'
newtags = verifyRiff(filename, validchunks, fourcc, tempdir, tags, offsets, scanenv, debug, unpacktempdir)
if newtags != []:
newtags.append('webp')
newtags.append('graphics')
return newtags
## generic method to verify RIFF files, such as WebP or WAV
def verifyRiff(filename, validchunks, fourcc, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None):
newtags = []
if "text" in tags or "compressed" in tags or "audio" in tags or "graphics" in tags:
return newtags
if not 'riff' in offsets:
return newtags
if not 0 in offsets['riff']:
return newtags
filesize = os.stat(filename).st_size
## there should at least be a valid header
if filesize < 12:
return newtags
rifffile = open(filename, 'rb')
rifffile.seek(4)
## size of bytes following the size field
rifffilesize = struct.unpack('<I', rifffile.read(4))[0]
if not rifffilesize + 8 == filesize:
rifffile.close()
return newtags
fourcc_read = rifffile.read(4)
if fourcc_read != fourcc:
rifffile.close()
return newtags
## then depending on the file format different
## content will follow.
## There are different chunks that can follow eachother
## in the file.
while rifffile.tell() != filesize:
chunkheaderbytes = rifffile.read(4)
if len(chunkheaderbytes) != 4:
rifffile.close()
return newtags
if not chunkheaderbytes in validchunks:
rifffile.close()
return newtags
## then read the size of the chunk
chunksizebytes = rifffile.read(4)
if len(chunksizebytes) != 4:
rifffile.close()
return newtags
chunksizebytes = struct.unpack('<I', chunksizebytes)[0]
curoffset = rifffile.tell()
if curoffset + chunksizebytes > filesize:
rifffile.close()
return newtags
rifffile.seek(curoffset + chunksizebytes)
rifffile.close()
newtags.append('riff')
return newtags
## generic method to verify AIFF and AIFF-C. This is very similar to RIFF
## but has different endianness and several other restrictions.
## https://en.wikipedia.org/wiki/Audio_Interchange_File_Format
def verifyAIFF(filename, cursor, conn, tempdir=None, tags=[], offsets={}, scanenv={}, debug=False, unpacktempdir=None, filehashes=None):
newtags = []
if "text" in tags or "compressed" in tags or "audio" in tags or "graphics" in tags:
return newtags
if not 'aiff' in offsets:
return newtags
if not 0 in offsets['aiff']:
return newtags
filesize = os.stat(filename).st_size
## there should at least be a valid header
if filesize < 12:
return newtags
bigendian = True
aifffile = open(filename, 'rb')
aifffile.seek(4)
## size of bytes following the size field. This field
## should be filesize - 8 if the whole file is AIFF
formsizebytes = aifffile.read(4)
formsize = struct.unpack('>I', formsizebytes)[0]
if formsize + 8 != filesize:
aifffile.close()
return newtags
## then check if it is little endian or big endian
## this is not used, but could be useful in the future
endianbytes = aifffile.read(4)
if endianbytes == 'AIFC':
bigendian = False
elif endianbytes == 'AIFF':
bigendian = True
else:
aifffile.close()
return newtags
## then depending on the file format different
## content will follow.
## There are different chunks that can follow eachother
## in the file.
seenchunkids = set()
while aifffile.tell() != filesize:
chunkheaderbytes = aifffile.read(4)
if len(chunkheaderbytes) != 4:
aifffile.close()
return newtags
seenchunkids.add(chunkheaderbytes)
## then read the size of the chunk
chunksizebytes = aifffile.read(4)
if len(chunksizebytes) != 4:
aifffile.close()
return newtags
chunksizebytes = struct.unpack('>I', chunksizebytes)[0]
curoffset = aifffile.tell()
if curoffset + chunksizebytes > filesize:
aifffile.close()
return newtags
aifffile.seek(curoffset + chunksizebytes)
aifffile.close()
## AIFF files have two mandatory chunks
if not ('COMM' in seenchunkids and 'SSND' in seenchunkids):
return newtags
newtags.append('aiff')
newtags.append('audio')
return newtags
## Verify if this is an Android resources file. These files can be found in
## Android APK archives and are always called "resources.arsc".
## There are various valid types of resource files, which are documented here:
##
## https://android.googlesource.com/platform/frameworks/base.git/+/d24b8183b93e781080b2c16c487e60d51c12da31/include/utils/ResourceTypes.h
##
## | |
<reponame>derNarr/synchronicity
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import division
import math
import random
from psychopy import visual, core
from helper import create_run_trial, create_show_text
class BaseStim(object):
"""
Base stimulus for moving dots.
This class encapsulates the logic that all stimuli have in common.
Abstract Class
--------------
Needs to implement:
* self.objects : list
* self.speed : int / float
* self._move_objects(self) : function
"""
def __init__(self, win, name, fps=60, start_pos=(0, 0)):
"""
Parameters
----------
win : psychopy.visual.Window
name : str
fps : int
start_pos : (int, int)
"""
self.win = win
self.name = name
self.fps = fps
self.frame_counter = 0
self.start_time = core.getTime()
self.start_pos = start_pos
#self.objects = list()
#self.speed
@property
def flip_duration(self):
"""
Flip duration in seconds.
"""
return 1./self.fps
@property
def time(self):
"""
Time the stimulus runs.
"""
return core.getTime() - self.start_time
def draw(self):
"""
Draws the stimuli an increases the frame counter.
"""
self._move_objects()
for object_ in self.objects:
object_.draw()
self.frame_counter += 1
def _move_objects(self):
"""
Moves the objects, must be implemented by child class.
"""
raise NotImplementedError()
def __str__(self):
return (str(self.__class__.__name__) + "_" + str(self.name)
+ ("_time%.2f" % self.start_time)
+ ("_speed%i" % int(self.speed)))
class HorizontalMovement(BaseStim):
"""
Moves one circle from left to right.
"""
def __init__(self, win, speed=100, *args, **kwargs):
"""
Parameters
----------
win : psychopy.visual.Window
speed : speed of the stimulus in pixel per second
"""
self.speed = speed
self.objects = (visual.Circle(win, radius=20, fillColor=(1,1,1),
units="pix"), )
super(HorizontalMovement, self).__init__(win, *args, **kwargs)
def _move_objects(self):
#tt = self.frame_counter * self.flip_duration
tt = self.time
x_0, y_0 = self.start_pos
circle = self.objects[0]
circle.pos = (self.speed * tt + x_0, y_0)
class CircleMovement(BaseStim):
"""
Moves one circle in a big circle. Round and Round.
"""
def __init__(self, win, radius=200, speed=300, *args, **kwargs):
"""
Parameters
----------
win : psychopy.visual.Window
radius : radius of the big circle
speed : speed of the stimulus in pixel per second
"""
self.radius = radius
self.speed = speed
self.objects = (visual.Circle(win, radius=20, fillColor=(1, 1, 1),
units="pix"), )
super(CircleMovement, self).__init__(win, *args, **kwargs)
@property
def angle_per_sec(self):
return self.speed / self.radius
def _move_objects(self):
#tt = self.frame_counter * self.flip_duration
tt = self.time
x_0, y_0 = self.start_pos
circle = self.objects[0]
angle = self.angle_per_sec * tt
xx = self.radius * math.cos(angle) + x_0 - self.radius
yy = self.radius * math.sin(angle) + y_0
# reflect
circle.pos = (xx, yy)
class RandomBlink(BaseStim):
"""
Circle blinks randomly over the display.
"""
def __init__(self, win, random_positions, blink_every_frame=20, *args,
**kwargs):
"""
Parameters
----------
win : psychopy.visual.Window
random_positions : sequence of pairs
blink_every_frame : number of frames a blink should occur
"""
self.random_positions = random_positions
self.blink_every_frame = blink_every_frame
self.objects = (visual.Circle(win, radius=20, fillColor=(1, 1, 1),
units="pix"), )
super(RandomBlink, self).__init__(win, *args, **kwargs)
def _move_objects(self):
# Note: didn't use start_pos
circle = self.objects[0]
idx = int(self.time // self.flip_duration // self.blink_every_frame)
try:
xx, yy = self.random_positions[idx]
except IndexError:
xx, yy = self.random_positions[-1]
circle.pos = (xx, yy)
@staticmethod
def generate_random_positions(radius=150, size=24, start_pos=(0, 0),
win_size=(1300, 700)):
"""
Parameters
----------
radius : blink radius in pixel
size : number of random positions to generate
start_pos : (int, int)
win_size : (int, int)
"""
pos = start_pos
random_positions = [pos, ]
for ii in range(size):
xx, yy = pos
angle = random.random() * 2 * math.pi
dx = radius * math.cos(angle)
dy = radius * math.sin(angle)
# reflect
if -win_size[0] / 2 <= xx + dx <= win_size[0] / 2:
xx += dx
else:
xx -= dx
if -win_size[1] / 2 <= yy + dy <= win_size[1] / 2:
yy += dy
else:
yy -= dy
pos = (xx, yy)
random_positions.append(pos)
return random_positions
class RandomWalk(BaseStim):
"""
Circle moves over the display as it is described in an array.
"""
def __init__(self, win, random_positions, *args, **kwargs):
"""
Parameters
----------
win : psychopy.visual.Window
random_positions : list of pairs
"""
self.random_positions = random_positions
self.objects = (visual.Circle(win, radius=20, fillColor=(1,1,1),
units="pix"), )
super(RandomWalk, self).__init__(win, *args, **kwargs)
def _move_objects(self):
# Note: didn't use start_pos
circle = self.objects[0]
idx = int(self.time // self.flip_duration)
try:
xx, yy = self.random_positions[idx]
except IndexError:
xx, yy = self.random_positions[-1]
circle.pos = (xx, yy)
@staticmethod
def generate_random_positions(speed=800, dangle=90/180*math.pi, size=480,
start_pos=(0, 0), win_size=(1300, 700),
flip_duration=1/60):
"""
Parameters
----------
speed : int
speed in pixel per second
dangle : float
range of an angle in rad that changes the current angle every flip
size : int
number of random positions to generate
start_pos : (int, int)
win_size : (int, int)
flip_duration : float
in seconds
"""
pos = start_pos
random_positions = [pos, ]
angle = random.random() * 2 * math.pi
for ii in range(size):
xx, yy = pos
dv = speed * flip_duration
angle += random.random() * dangle - dangle / 2
dx = dv * math.cos(angle)
dy = dv * math.sin(angle)
# reflect
if -win_size[0] / 2 <= xx + dx <= win_size[0] / 2:
xx += dx
else:
angle += math.pi / 2
xx -= dx
if -win_size[1] / 2 <= yy + dy <= win_size[1] / 2:
yy += dy
else:
angle += math.pi / 2
yy -= dy
pos = (xx, yy)
random_positions.append(pos)
return random_positions
class MoveStopMove(BaseStim):
"""
Moves one circle from left to right and stops it at a given time.
"""
def __init__(self, win, speed=100, stop_t=1.0, stop_dur=1.0, *args,
**kwargs):
"""
.. warning::
This stimulus only works for left to right movement, i. e. positive
speeds.
Parameters
----------
win : psychopy.visual.Window
speed : int
speed of the stimulus in pixel per second
stop_t : float
time to stop
stop_dur : float
duration how long circle stops
"""
self.stop_t = stop_t
self.stop_dur = stop_dur
self.speed = speed
self.objects = (visual.Circle(win, radius=20, fillColor=(1,1,1),
units="pix"), )
super(MoveStopMove, self).__init__(win, *args, **kwargs)
def _move_objects(self):
#tt = self.frame_counter * self.flip_duration
x_0, y_0 = self.start_pos
tt = self.time
# wait and correct time
if self.stop_t < tt <= self.stop_t + self.stop_dur:
tt = self.stop_t
elif tt > self.stop_t + self.stop_dur:
tt -= self.stop_dur
circle = self.objects[0]
circle.pos = (self.speed * tt + x_0, y_0)
class MoveExplodeMove(BaseStim):
"""
Moves one circle from left to right and explodes it at a given time.
"""
def __init__(self, win, speed=300, explode_t=0, explode_dur=1.0,
radius=150, *args, **kwargs):
"""
.. warning::
This stimulus only works for left to right movement, i. e. positive
speeds.
Parameters
----------
win : psychopy.visual.Window
speed : int
speed of the stimulus in pixel per second
explode_t : float
time at that the explosion is triggered
explode_dur : float
duration how long circle explodes
radius : float
radius of the explosion in pixel
"""
self.explode_t = explode_t
self.explode_dur = explode_dur
self.speed = speed
self.radius = radius
self.circles = (visual.Circle(win, radius=20, fillColor=(1,1,1),
units="pix"), )
self.explosion = (
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
visual.Circle(win, radius=10, fillColor=(1,1,1), units="pix"),
)
super(MoveExplodeMove, self).__init__(win, *args, **kwargs)
def _trajectory(self, tt):
x_0, y_0 = self.start_pos
return (self.speed * tt + x_0, y_0)
def _move_objects(self):
#tt = self.frame_counter * self.flip_duration
tt = self.time
# wait and correct time
if self.explode_t < tt <= self.explode_t + self.explode_dur:
tt = self.explode_t
self.objects = self.explosion
nn = len(self.explosion)
for ii, circle in enumerate(self.objects):
xx, yy = self._trajectory(tt)
xx += self.radius * math.cos(2 * math.pi * ii / nn)
yy += self.radius * math.sin(2 * math.pi * ii / nn)
circle.pos = (xx, yy)
return
elif tt > self.explode_t + self.explode_dur:
tt -= self.explode_dur
self.objects = self.circles
circle = self.objects[0]
circle.pos = self._trajectory(tt)
if __name__ == "__main__":
win = visual.Window(size=(1280, 720))
text_stim = visual.TextStim(win)
fixation_cross = visual.TextStim(win, '+', height=25, units='pix')
eye_tracker = type("EyeTracker", (object,), {})() # creates an instance of an empty class
eye_tracker.sendImageMessage = lambda x: None
def calibrate():
print("calibration done")
show = create_show_text(win, text_stim, calibrate)
run_trial = create_run_trial(win, eye_tracker, calibrate, fixation_cross)
hori = HorizontalMovement(win, name="hori300", speed=800, start_pos=(-380, 0))
cm = CircleMovement(win, name="cm", radius=200)
msm = MoveStopMove(win, name="msm300", speed=300, stop_dur=1.0,
start_pos=(-600, 0))
mem = MoveExplodeMove(win, name="mem300", speed=300, start_pos=(-600, 0))
rw = RandomWalk(win,
RandomWalk.generate_random_positions(win_size=(1280, 720),
speed=400),
name="rw300")
rb = RandomBlink(win,
RandomBlink.generate_random_positions(win_size=(800, 600),
radius=200),
name="rb200",
blink_every_frame=60)
show(u"Schön, dass Sie | |
# coding=utf-8
from __future__ import division
from search_module.python_from_feng.se.config import root
import pickle
import time
import jieba, math
import jieba.posseg as pseg
class Subject:
#subs = [{"code": '0828', "k": 36}, {"code": '0829', "k": 14}]
#self.subs = subs
#self.Subject_for_teacher = {sub['code']: Subject_for_teacher(sub, self.id_name) for sub in self.subs}
#self.Subject_for_teacher = {0828:Subject_for_teacher({"code": '0828', "k": 36},self.id_name),0829:Subject_for_teacher({"code": '0829', "k": 14}}
def __init__(self, sub, id_name):
#({"code": '0828', "k": 36},self.id_name)
self.sub = sub
#self.id_name 字典
self.id_name = id_name
code = self.sub['code']
k = self.sub['k']
self.path = root+'/' + code + '/k' + str(k)
#词的索引 wordIndex {word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数, col_fre: word[w] / length}, word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数}}
self.lmindex = pickle.load(open(self.path+'/wordIndex', 'rb'))
#word和topic的关系 wordToTopic word2topic {'催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0,}
self.ldaword = pickle.load(open(self.path+'/wordToTopic', 'rb'))
#教师和topic的关系 {teacher1: {topic1: p1, topic2: p2}, ...}
self.ldaexp = pickle.load(open(self.path+'/teacherTopic', 'rb'))
#教师PageRank评分 {teacher_id:value,...}
self.pagerank = pickle.load(open(self.path+'/teacherRank', 'rb'))
#教师对应的词:
self.teacher_word = pickle.load(open(self.path + '/teacherWord', 'rb'))
self.cal = 0.9
def cal_lda_one_word(self, word, teacher_id):
'''
没有用
:param word:
:param teacher_id:
:return:
'''
"""计算单个词的专家lda得分"""
# self.ldaword wordToTopi {'催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0, }
ld = self.ldaword.get(word)
# sort {topic_id1:value,...} 筛选出value>1.0e-06 value降序排序
sort = {}
# res {teacher_id1:value,...}
res = {}
# ld '催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0, }
if ld != None:
if teacher_id is not None:
ld = {k: ld[k] for k in ld if k in teacher_id}
# 对字典中的项,进行值升序排序,然后逆序,返回一个列表 [(topic_id1:value),(topic_id2:value),(topic_id3:value),...]
sortld = sorted(ld.items(), key=lambda item: item[1], reverse=True)
a = [r for r in sortld if r[1] > 1.0e-06]
for i in a:
sort[i[0]] = i[1]
for j in sort.keys():
# j是 topic_id
# ldaexp 教师和topic的关系{teacher1: {topic1: p1, topic2: p2}, ...}
for m in self.ldaexp.keys():
# m是teacher_id
if j in self.ldaexp[m]:
# id为m的老师对某主题的值乘这个主题对这个词的值
# res[m] = self.ldaexp[m][j] * sort[j]
if m in res:
res[m] += self.ldaexp[m][j] * sort[j]
else:
res[m] = self.ldaexp[m][j] * sort[j]
return res
def cal_one_word(self,word,teacher_id):
'''
计算单个词的专家语言模型得分
:param word:一个被搜索的词
:param teacher_id:
:return:
'''
# lm = wordIndex {word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数, col_fre: word[w] / length}, word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数}}
lm = self.lmindex.get(word) #type dict
res = {}
# 引入平滑系数
#lm可能为空,因为这是一个学科的倒排索引表,可能没这个单词
if lm != None:
#lm {teacher_id1: word1出现的次数 / 总词数,teacher_id2: word1出现的次数 / 总词数, col_fre: word[w] / length}
if teacher_id is not None:
lm = {k: lm[k] for k in lm if k in teacher_id or k == "col_fre"}
for l in lm.keys():
#l teacher_id
if l != 'col_fre':
res[l] = self.cal*lm[l]+(1-self.cal)*lm['col_fre']
res['col'] = lm['col_fre']
#res = {'teacher_id':value,...,col:lm['col_fre']}
return res
def cal_rank(self, res, lda, cof):
'''
"""计算专家排序"""
:param res: {word1:{'teacher_id':value,...,col:lm['col_fre'],...}
:param lda: {word1:{teacher_id1:value,...},...}
:param cof: cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
:return:
'''
rank = {}
#wd是res的key,代表词,res[wd]还是字典,代表词对应的老师及其对该词的值,r是res[wd]中的key,代表老师id
#exp_list [teacher_id1,teacher_id2,...]
exp_list = [r for wd in res.keys() for r in res[wd]]
# ------------------------------------------------------------------
for wd in lda.keys():
for r in lda[wd]:
exp_list.append(r)
# ------------------------------------------------------------------
'''
len(exp_list): 3
len(exp_list): 4
len(exp_list): 0
len(exp_list): 0
len(exp_list): 0
len(exp_list): 2
len(exp_list): 2
len(exp_list): 4
len(exp_list): 3
len(exp_list): 2
len(exp_list): 0
'''
exp_list = set(exp_list)
print("len(exp_list): ", len(exp_list))
if 'col' in exp_list:
#教师名单,所以去掉cof
exp_list.remove('col')
#rank {'teacher_id1':cof,teacher_id2:cof}
#r teacher_id
for r in exp_list:
rank[r] = cof
#wd word
for wd in res.keys():
if len(res[wd]) != 0:
#如果res[wd]中有r这个teacher_id,那么给这个
if res[wd].get(r):
rank[r] *= res[wd][r]
else:
rank[r] *= res[wd]['col']
if wd in lda and lda[wd].get(r):
adjust = lda[wd][r]
rank[r] *= adjust
else:
rank[r] *= 10e-6
for wd in lda:
if wd not in res and r in lda[wd]:
rank[r] *= lda[wd][r]
else: #这句是我自己加的
rank[r] *= 10e-6
if self.pagerank.get(r):
# rank[r] *= self.pagerank[r] * self.id_name[r]["total"]
rank[r] *= self.pagerank[r] * self.id_name[r]["composite_score"]
return rank
def do_query(self, words, teacher_id):
#words为搜索的关键字集合的列表,teacher_id默认为空
#temp_res {word1:{'teacher_id':value,...,col:lm['col_fre']},word2:{'teacher_id':value,...,col:lm['col_fre']},...}
temp_res = {}
# res {word1:{teacher_id1:value,...},...}
temp_lda = {}
for word in words:
temp_res[word] = self.cal_one_word(word, teacher_id)
temp_lda[word] = self.cal_lda_one_word(word, teacher_id)
for word in words:
if word in temp_res and not temp_res[word]:
#Python 字典 pop() 方法删除字典给定键 key 及对应的值,返回值为被删除的值。key 值必须给出。 否则,返回 default 值。
temp_res.pop(word)
if word in temp_lda and not temp_lda[word]:
temp_lda.pop(word)
if not temp_res and not temp_lda:
return []
#返回xy(x的y次方)的值。
cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
level = math.pow(10e-6, len(words)+1)
rank = self.cal_rank(temp_res, temp_lda, cof)
sortrk = sorted(rank.items(), key=lambda item: item[1], reverse=True)
result = [(r[0], r[1]) for r in sortrk if r[1] > level]
#result [(teacher_id,'权值'),(),..]
return result
def get_teacher_word(self):
print("teacher_word: ", self.teacher_word[94720])
class Query:
def __init__(self, subs):
# [{"code": '01', "k": 46}, {"code": '02', "k": 98}]
self.subs = subs
#{teacher_id1:{id:xx,name:xxx},...}
self.id_name = pickle.load(open(root + '/teacherName', 'rb'))
self.institution_info = pickle.load(open(root+'/institutionName', 'rb'))
self.school_info = pickle.load(open(root+'/SchoolName', 'rb'))
self.Subject = {sub['code']: Subject(sub, self.id_name) for sub in self.subs}
self.stop = []
stopword = [line.strip() for line in open('fenci/stopwords.txt', encoding='utf-8').readlines()]
stopword1 = [line.strip() for line in open('fenci/stop_word_4.txt', encoding='utf-8').readlines()]
stopwords = [i.split(':')[0] for i in stopword1]
self.stop.extend(stopword)
self.stop.extend(stopwords)
self.fill = ['vn', 'n', 'nr', 'nr1', 'nr2', 'nrj', 'nrf', 'ns', 'nsf', 'nt', 'nz', 'nl', 'ng']
jieba.load_userdict('fenci/userdict.txt')
s = Subject(subs[0], self.id_name)
print("------------", s.teacher_word[116226])
def prints(self, result):
# {'0828': [(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)],
# '0829': [],
# '0830': [(126955, 0.0007021102104810927), (68129, 0.00013266169457311943), (22286, 0.00011640344697493587),
# (5818, 1.821814740424121e-05)]}
for code in result:
size = len(result[code])
if size == 0:
continue
#教师个数
print("学科:%s,有关教师个数:%d" % (code, size))
teacher = result[code]
for t in teacher:
#教师名字:(id:权重)
print(str(self.id_name[t[0]]["SCHOOL_ID"])+self.id_name[t[0]]["NAME"]+":"+str(t))
print()
def prints_for_institution(self,result,school):
'''
将搜索到的老师转化为学院信息,并打印
:param result:搜索得到的结果{'code(学科代码)':[(teacher_id,value),...],...}
:return:
'''
#学院信息
institution_info = {}
for code in result:
#学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info in teacher_info_s:
#教师id
teacher_id = teacher_info[0]
#学院id
institution_id = self.id_name[teacher_id]['INSTITUTION_ID']
#将老师的权值归入老师对应的学院中,以计算学院信息
if institution_id in institution_info:
institution_info[institution_id] += teacher_info[1]
else:
institution_info[institution_id] = teacher_info[1]
#学院按权值从大到小排序
institution_rank = dict(sorted(institution_info.items(),key=lambda x:x[1],reverse=True))
#输出学院的学校名+学院名+学院权值
for institution_id in institution_rank:
#学院所属的学校名字
schoolName = self.institution_info[institution_id]['SCHOOL_NAME']
#打印所查学校的院系
if schoolName == school:
print(schoolName+self.institution_info[institution_id]['NAME']+str(institution_rank[institution_id]))
def prints_for_school(self, result, city=None):
'''
:param result: 搜索得到的结果{'code(学科代码)':[(teacher_id,value),...],...}
:return:
'''
#学校信息
school_info = {}
for code in result:
#学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info in teacher_info_s:
#教师id
teacher_id = teacher_info[0]
#学院id
school_id = self.id_name[teacher_id]['SCHOOL_ID']
#将老师的权值归入老师对应的学院中,以计算学院信息
if school_id in school_info:
school_info[school_id] += teacher_info[1]
else:
school_info[school_id] = teacher_info[1]
#学院按权值从大到小排序
# print(school_info)
# print(sorted(school_info.items(),key=lambda x: x[1], reverse=True))
# print(dict(sorted(school_info.items(),key=lambda x: x[1], reverse=True)))
# school_rank = dict(sorted(school_info.items(), key=lambda x: x[1],reverse=True))
school_rank = sorted(school_info.items(), key=lambda x: x[1],reverse=True)
#输出学院的学校名+学院名+学院权值
if city == None:
print(school_rank)
for school_id in school_rank:
# print(self.school_info[school_id]['NAME']+str(school_rank[school_id]))
print(self.school_info[school_id[0]]['NAME']+str(school_id[1]))
# pass
else:
print(school_rank)
for school_id in school_rank:
city_name = self.school_info[school_id]['CITY']
if city_name == city:
# print(self.school_info[school_id]['NAME'] + str(school_rank[school_id]))
print(self.school_info[school_id[0]]['NAME'] + str(school_id[1]))
def do_query(self,text,filer):
#将输入内容进行分词
text = jieba.cut(text, cut_all=True)
text = " ".join(text)
seg_list = pseg.cut(text)
words = []
for word, flag in seg_list:
if flag in self.fill and word not in self.stop:
#是名词且不是停用词,将其纳入搜索列表
words.append(word)
print(words)
if "school" in filer and len(filer["school"])>0:
teacher_id = {t for t in self.id_name if self.id_name[t]['school_id'] in filer['school']}
else:
teacher_id = None
#筛选符合院系信息的老师
if "institution" in filer and len(filer['institution'])>0:
teacher_id = {t for t in self.id_name if str(self.id_name[t]['INSTITUTION_ID']) in filer['institution']}
else:
teacher_id=None
if "name" in filer and len(filer["name"]) > 0:
if teacher_id:
teacher_id={t for t in teacher_id if self.id_name[t]['name'].find(filer["name"])>=0}
else:
teacher_id = {t for t in self.id_name if self.id_name[t]['name'].find(filer["name"])>=0}
result={}
#teacher_id dict None
for sub in self.Subject:
if "code" in filer and len(filer['code']) > 0 and sub not in filer['code']:
continue
else:
# self.Subject_for_teacher {code1:Subject_for_teacher(sub1),sode2:Subject_for_teacher2(sub2)}
result[sub]=self.Subject[sub].do_query(words,teacher_id)
#result {code:[teacher_id:value,].,code:[],...}
return result
def queryForProvince(filer):
pass
def queryForCity(filer,province='北京'):
pass
def queryForSchool(filer,city = '北京市'):
pass
# t = input("请输入要搜索的内容")
t = "电力电子与电力传动"
start = time.time()
filer['city'] = city
r = query.do_query(t, filer)
query.prints_for_school(r)
# end = time.time()
# # print (end - start)
def queryForInstitution(filer,school = '中国人民大学'):
pass
t = input("请输入要搜索的内容")
start = time.time()
#filer['school'] = school
r = query.do_query(t, filer)
query.prints_for_institution(r,school)
# end = time.time()
# # print (end - star
def queryForTeacher(filer,institution_id = '1526'):
t = input("请输入要搜索的内容")
start = time.time()
filer['institution']=institution_id
r = query.do_query(t, filer)
query.prints(r)
end = time.time()
# print (end - start)
if __name__ == '__main__':
subject = [
{"code": '0810', "k": 10},
{"code": '0811', "k": 10},
{"code": '0812', "k": 10},
{"code": '0805', "k": 10},
{"code": '0835', "k": 10},
{"code": '0827', "k": 14},
{"code": '0832', "k": 10},
{"code": '081101', "k": 12},
{"code": '0833', "k": 10},
{"code": '0835', "k": 10},
{"code": '0828', "k": 16},
{"code": '0831', "k": 12},
{"code": '081202', "k": 12},
| |
Size:').grid(row=5, column=0)
d_length_drop = tk.OptionMenu(right_frame, d_length_select, *d_length_list, command=update_deriv)
d_length_drop.grid(row=5, column=1)
# Autoscale Toggle
ascale_label = tk.Label(right_frame, text='Autoscale arrows:')
ascale_label.grid(row=7, column=0)
ascale_toggle = tk.Button(right_frame, image=toggle_image_off, bd=0, command=scale_toggle_response)
ascale_toggle.grid(row=7, column=1, pady=5)
ascale_toggle.bind('<Enter>', lambda x: hover_instruction_response(1, 1))
ascale_toggle.bind('<Leave>', lambda x: hover_instruction_response(1, 0))
# define entry boxes to allow user to input x_m and y_m
x_m_entry = tk.Entry(right_frame, width=12)
y_m_entry = tk.Entry(right_frame, width=12)
x_m_entry.grid(row=6, column=0)
y_m_entry.grid(row=6, column=1)
# and a button to submit these:
Set_target_btn = tk.Button(right_frame, text='Set Target', command=set_inset_target)
Set_target_btn.grid(row=6, column=2, padx=20)
Set_target_btn.bind('<Enter>', lambda x: hover_instruction_response(2, 1))
Set_target_btn.bind('<Leave>', lambda x: hover_instruction_response(2, 0))
analytic_select = tk.IntVar()
analytic_select.set(0)
tk.Label(right_frame, text= 'Toggle Analytic Label:').grid(row=9, column=0)
analytic_toggle = tk.Button(right_frame, image=toggle_image_off, bd=0, command=analytic_toggle_response)
analytic_toggle.grid(row=9, column=1)
analytic_toggle.bind('<Enter>', lambda x: hover_instruction_response(4, 1))
analytic_toggle.bind('<Leave>', lambda x: hover_instruction_response(4, 0))
'''
SINGULARITY NOTEBOOK
'''
# get a button to draw on singularities
singularity_button = tk.Button(singular_frame, text='search singularities', command=show_singularities)
singularity_button.grid(row=0, column=0)
# entry for N
tk.Label(singular_frame, text='<- sampling points').grid(row=0, column=2, columnspan=2)
fine_grid_N_entry = tk.Entry(singular_frame, width=5)
fine_grid_N_entry.grid(row=0, column=1)
fine_grid_N_entry.insert(0, 10)
# define an entry where the user can inpu known singularity equation
# this will be taken and plotted as a red, dotted line
tk.Label(singular_frame, text='singularity equation:').grid(row=1, column=0, columnspan=2)
# define a dropdown to select y= or x=
singular_var = tk.StringVar()
singular_list = ['y=', 'x=', 'point']
singular_var.set(singular_list[0])
dpd_drop = tk.OptionMenu(singular_frame, singular_var, *singular_list, command=singular_drop_response)
dpd_drop.grid(row=2, column=0)
# equation entry box
known_singularity_entry = tk.Entry(singular_frame, width=15)
known_singularity_entry.grid(row=2, column=1)
known_singularity_entry.insert(0, '')
# define asubmit button to that entry
submit_known_singularity_btn = tk.Button(singular_frame, text='show expression', command=known_singularity_response)
submit_known_singularity_btn.grid(row=3, column=0)
'''
set up all in BOTTOM FRAME
'''
# define entry boxes for the field equations in x and y
component_x_entry_label = tk.Label(bot_frame, text='dx component')
component_x_entry_label.grid(row=0, column=0)
x_comp_entry = tk.Entry(bot_frame, width=20, borderwidth=2)
x_comp_entry.grid(row=1, column=0)
x_comp_entry.insert(0, 'y*sin(x)')
component_y_entry_label = tk.Label(bot_frame, text='dy component')
component_y_entry_label.grid(row=0, column=1)
y_comp_entry = tk.Entry(bot_frame, width=20, borderwidth=2)
y_comp_entry.grid(row=1, column=1)
y_comp_entry.insert(0, '-x*cos(y)')
# define strings from initial components
# these are needed by the derivative function, therefore for the derivative
# to work on the initial field, need to initially define them
# THESE ARE HERE TO APPEAR AFTER THE ENTRIES ARE ACTUALLY DEFINED
# NOT VITAL, BUT EASIER
string_x = str(x_comp_entry.get())
string_y = str(y_comp_entry.get())
# set up a dropdown box for 1-forms and VFs
field_select = tk.StringVar()
field_select.set(field_name_list[0])
field_select_drop_label = tk.Label(bot_frame, text='Select Pre-Defined 1-Form:')
field_select_drop_label.grid(row=2, column=0, columnspan=2)
field_select_drop = ttk.Combobox(bot_frame, value=field_name_list, width=40)
field_select_drop.current(0)
field_select_drop.grid(row=3, column=0, columnspan=2)
field_select_drop.bind("<<ComboboxSelected>>", field_selection_response)
'''
what was in small frame initally, now also in botton notebook
'''
# define the PLOT button
PLOT_btn = tk.Button(small_frame, text='PLOT', padx=40, pady=20, command=PLOT_response)
PLOT_btn.grid(row=0, column=2, rowspan=2)
# define a button that will just plot the given cartesian field
# on a polar grid
polar_grid_plot_btn = tk.Button(small_frame, text='Polar plot', padx=20, command= lambda: Polar_grid_plot_response(tensor.get()))
polar_grid_plot_btn.grid(row=1, column=1)
# define a button in small frame that will open new window to adjust arrowheads
custom_btn = tk.Button(small_frame, text='Visuals Customise', padx=1, pady=1, command=custom_btn_reponse)
custom_btn.grid(row=0, column=0)
# define a button to customise the polar grids
polar_grid_custom_btn = tk.Button(small_frame, text='Polar customise', padx=7, pady=1, command=polar_grid_custom_reponse)
polar_grid_custom_btn.grid(row=1, column=0)
# define entry boxes for each (in order): L, pt_den, s_max and a ; and info txt
# Also input into them the initial values
tk.Label(small_frame, text='Size').grid(row=2, column=0)
L_entry = tk.Entry(small_frame, width=5, borderwidth=1)
L_entry.grid(row=3, column=0, padx=2)
L_entry.insert(0, L)
tk.Label(small_frame, text='grid').grid(row=2, column=1)
pt_den_entry = tk.Entry(small_frame, width=5, borderwidth=1)
pt_den_entry.grid(row=3, column=1, padx=2)
pt_den_entry.insert(0, pt_den)
tk.Label(small_frame, text='max sheets').grid(row=2, column=2)
s_max_entry = tk.Entry(small_frame, width=5, borderwidth=1)
s_max_entry.grid(row=3, column=2, padx=2)
s_max_entry.insert(0, s_max)
# logarithmic scaling button:
logartmic_scale_bool = tk.IntVar()
logartmic_scale_bool.set(0)
logartmic_scale_toggle = tk.Button(small_frame, image=toggle_image_off, bd=0, command=log_scale_toggle_response)
logartmic_scale_toggle.grid(row=0, column=1)
logartmic_scale_toggle.bind('<Enter>', lambda x: hover_instruction_response(0, 1))
logartmic_scale_toggle.bind('<Leave>', lambda x: hover_instruction_response(0, 0))
'''
set up all in LI tab
'''
# define a label that will display it
tk.Label(LI_frame, text='Shape Area:').grid(row=0, column=0)
shape_area_label = tk.Label(LI_frame, text=shape_area)
shape_area_label.grid(row=0, column=1)
tk.Label(LI_frame, text='LI Total:').grid(row=1, column=0, padx=10)
LI_total_label = tk.Label(LI_frame, text=LI_total)
LI_total_label.grid(row=1, column=1)
tk.Label(LI_frame, text='Ratio:').grid(row=1, column=2)
ratio1_label = tk.Label(LI_frame, text=ratio1)
ratio1_label.grid(row=1, column=3)
tk.Label(LI_frame, text='Flux:').grid(row=2, column=0)
flux_label = tk.Label(LI_frame, text=flux)
flux_label.grid(row=2, column=1)
tk.Label(LI_frame, text='Ratio:').grid(row=2, column=2)
ratio2_label = tk.Label(LI_frame, text=ratio2)
ratio2_label.grid(row=2, column=3)
# display a restart button that will clear the lines
# and restart the variables.
LI_restart_btn = tk.Button(LI_frame, text='LI Restart', padx=20, command=LI_restart)
LI_restart_btn.grid(row=3, column=0, columnspan=2)
# define a drop down to draw: connected lines, square or circle.
LI_shape_select = tk.StringVar()
LI_shape_list = ['Polygon', 'Circle']
LI_shape_select.set(LI_shape_list[0])
LI_shape_instruction = tk.Label(LI_frame, text='Shape:')
LI_shape_instruction.grid(row=4, column=0)
LI_shape_drop = tk.OptionMenu(LI_frame, LI_shape_select, *LI_shape_list, command=LI_shape_select_response)
LI_shape_drop.grid(row=4, column=1)
# input the radiobuttons for arrows, stacks or both again here
arrow_btn = tk.Radiobutton(LI_frame, text='arrow', variable=tensor, value=1, command=lambda: vect_type_response(tensor.get())).grid(row=7, column=1)
stack_btn = tk.Radiobutton(LI_frame, text='stack', variable=tensor, value=0, command=lambda: vect_type_response(tensor.get())).grid(row=7, column=2)
# Radiobutton for showing flux/circulation
showcol = tk.IntVar()
showcol.set(0)
tk.Label(LI_frame, text='Colour Curve:').grid(row=8, column=0)
shownone_btn = tk.Radiobutton(LI_frame, text='None', variable=showcol, value=0, command= lambda: showcol_response(showcol.get())).grid(row=8, column=1)
showcirc_btn = tk.Radiobutton(LI_frame, text='Show Circ.', variable=showcol, value=1, command= lambda: showcol_response(showcol.get())).grid(row=8, column=2)
showflux_btn = tk.Radiobutton(LI_frame, text='Show Flux', variable=showcol, value=2, command= lambda: showcol_response(showcol.get())).grid(row=8, column=3)
# add an entry so the user can choose what grid they want
# by choosing grid lines separation
grid_LI_poly_label = tk.Label(LI_frame, text='Grid Separation:').grid(row=9, column=0)
grid_sep_poly_entry = tk.Entry(LI_frame, width=10)
grid_sep_poly_entry.grid(row=9, column=1)
grid_sep_poly_entry.insert(0, '2')
# define a button to submit these changes
submit_poly_sep_grid = tk.Button(LI_frame, text='Submit Grid', command=poly_grid_submit)
submit_poly_sep_grid.grid(row=9, column=2)
# Autoscale Arrows Toggle
tk.Label(LI_frame, text='Autoscale arrows:').grid(row=10, column=0)
ascale_toggle_LI = tk.Button(LI_frame, image=toggle_image_off, bd=0, command=scale_toggle_response)
ascale_toggle_LI.grid(row=10, column=1, pady=5)
'''
DEFINE ALL WINDGETS IN CALCULUS TAB
'''
# define a window to supply the 2-form
tk.Label(calculus_frame, text='2-form on R2').grid(row=0, column=1)
form_2_entry = tk.Entry(calculus_frame, width=15, borderwidth=2)
form_2_entry.grid(row=0, column=0)
form_2_entry.insert(0, form_2_str)
# begin displaying it on green colour to show that this is ebing displayed to
# beign with
form_2_entry.configure(bg='#C0F6BB')
# extra label for 0 form.
# for now not an entry box because it doesn't get used anywhere
# I make it red to remember to come back to it later and use it.
Label_zero_form = tk.Label(calculus_frame, text='Zero form:')
Label_zero_form.grid(row=4, column=0)
# set up an entry for zero forms:
form_0_entry = tk.Entry(calculus_frame, width=15, borderwidth=2)
form_0_entry.grid(row=4, column=1)
form_0_entry.insert(0, '')
# set up a button to plot the 0-form
form_0_btn = tk.Button(calculus_frame, text='0-form plot', padx=3, pady=5, command=form_0_response)
form_0_btn.grid(row=4, column=2)
# define a button to submit the supplied 2-form and plot it as blocks
form_2_btn = tk.Button(calculus_frame, text='2-form plot', padx=3, pady=5, command=form_2_response)
form_2_btn.grid(row=3, column=1)
# define a button that will just plot the 1-form
# this will not be needed when its it merged with main GUI
# as there will already be a plot button there
form_1_btn = tk.Button(calculus_frame, text='1-form plot', padx=3, pady=5, command=form_1_stacks_response)
form_1_btn.grid(row=3, column=0)
# add a button to plot the interior derivative as superposing stack fields
INT_btn = tk.Button(calculus_frame, text='Int Deriv', padx=0, pady=2, command=Int_deriv_response)
INT_btn.grid(row=5, column=0)
# define a button to plot the exterior derivative from given u and v
# Note, it will get the 2-form first, then return back down
# to a one form to avoid concellations
# therefore, it will also just be one possible representation
# not the only possible one
EXT_int_btn = tk.Button(calculus_frame, text='Ext Deriv', padx=0, pady=2, command=Ext_deriv_response)
EXT_int_btn.grid(row=5, column=1)
# define a wedge product button that will let the user input TWO 1-forms
# in a new window to be wedged to gice a 2-form
wedge_btn = tk.Button(calculus_frame, text='Wedge', padx=0, pady=2, command=wedge_2_response)
wedge_btn.grid(row=6, column=0)
# define ab utton that will Find the Hodge dual
Hodge_btn = tk.Button(calculus_frame, text='Hodge', padx=5, pady=2, command=Hodge_full)
Hodge_btn.grid(row=7, column=0)
# define radiobuttons button to choose zooming with the mouse on 2-forms on R2
# and as opposed to tools
R2_tools_opt = tk.IntVar()
R2_tools_opt.set(0)
R2_tools_Tools_btn = tk.Radiobutton(calculus_frame, text='Tools', variable=R2_tools_opt, value=0, command=lambda: R2_tools_handler(R2_tools_opt.get()))
R2_tools_Zoom_btn = tk.Radiobutton(calculus_frame, text='Zoom', variable=R2_tools_opt, value=1, command=lambda: R2_tools_handler(R2_tools_opt.get()))
R2_tools_int_btn = tk.Radiobutton(calculus_frame, text='Area Int', variable=R2_tools_opt, value=2, command=lambda: R2_tools_handler(R2_tools_opt.get()))
R2_tools_Tools_btn.grid(row=8, column=0)
R2_tools_Zoom_btn.grid(row=8, column=1)
R2_tools_int_btn.grid(row=8, column=2)
# set up a zooming tool for that too
tk.Label(calculus_frame, text='Zoom').grid(row=9, column=0)
zoom_slider_R2 = tk.Scale(calculus_frame, from_=1, to=20, orient=tk.HORIZONTAL)
zoom_slider_R2.bind("<ButtonRelease-1>", update_2_form_zoom)
zoom_slider_R2.grid(row=9, column=1)
zoom_slider_R2.configure(state=tk.DISABLED)
# Drop down to select the R2 2 form zoom plot point density
zoomR2pd_select = tk.IntVar()
zoomR2pd_select.set(11)
zoomR2pd_list = [5, 6, 10, 11, 15, 16, 20, 21]
tk.Label(calculus_frame, text='Inset Plot Point Density:').grid(row=10, column=0)
zoomR2pd_drop = tk.OptionMenu(calculus_frame, zoomR2pd_select, *zoomR2pd_list, command=update_2_form_zoom)
zoomR2pd_drop.grid(row=10, column=1)
# Drop down to select inset axis size for R2 2 forms
zoomR2_length_select = tk.DoubleVar()
zoomR2_length_list = [0.1, 0.2, 0.3, 0.4, 0.5]
zoomR2_length_select.set(zoomR2_length_list[2])
tk.Label(calculus_frame, text='Inset Fractional Size:').grid(row=11, column=0)
zoomR2_length_drop = tk.OptionMenu(calculus_frame, zoomR2_length_select, *zoomR2_length_list, command=update_2_form_zoom)
zoomR2_length_drop.grid(row=11, column=1)
# define entry boxes to allow user to input x_m and y_m
x_m_entry_calc = tk.Entry(calculus_frame, width=12)
y_m_entry_calc = tk.Entry(calculus_frame, width=12)
x_m_entry_calc.grid(row=12, column=0)
y_m_entry_calc.grid(row=12, column=1)
# and a button to submit these:
Set_target_btn_calc = tk.Button(calculus_frame, text='Set Target', command=set_inset_target_calc)
Set_target_btn_calc.grid(row=12, column=2, padx=20)
# add a dropdown menu for 2-forms, for now at the end of this tab
select_form_2 = tk.StringVar()
select_form_2.set(list_form_2_names[0])
select_form_2_drop_label = tk.Label(calculus_frame, text='Select Pre-Defined 2-Form:')
select_form_2_drop_label.grid(row=1, column=0, columnspan=3)
select_form_2_drop = ttk.Combobox(calculus_frame, value=list_form_2_names, width=40)
select_form_2_drop.current(0)
select_form_2_drop.grid(row=2, column=0, columnspan=3)
select_form_2_drop.bind("<<ComboboxSelected>>", selection_form_2_response)
'''
DEFINE WIDGETS USED IN R3 CODE
'''
height_frame = tk.LabelFrame(r3_frame, text='viewing frame', padx=2, pady=2)
height_frame.grid(row=0, column=0)
# Label to show current axis value
axis_height_txt = tk.Label(height_frame, text=str(z[0]))
axis_height_txt.grid(row=1, column=0)
# on the left, make a 'move down' button
down_height = tk.Button(height_frame, text=' \/ ', command=lambda: label_update(-1))
down_height.grid(row=2, column=0)
# on the right, make a 'move up' button
up_height = tk.Button(height_frame, text=' /\ ', command=lambda: label_update(1))
up_height.grid(row=0, column=0)
# define a button to submit the currently chosen value:
Submit_h_btn = tk.Button(height_frame, text='SUBMIT', padx=2, pady=50, command=slide)
Submit_h_btn.grid(row=0, column=1, rowspan=3, padx=5)
# define rediobuttons to chose from which axis the user is looking:
view_tk = tk.StringVar()
view_tk.set('z')
view_z_btn = tk.Radiobutton(height_frame, text='z', variable=view_tk, value='z', command=lambda: view_response(view_tk.get())).grid(row=0, column=2)
view_z_btn = tk.Radiobutton(height_frame, text='y', variable=view_tk, value='y', command=lambda: view_response(view_tk.get())).grid(row=1, column=2)
view_z_btn = tk.Radiobutton(height_frame, text='x', variable=view_tk, value='x', command=lambda: view_response(view_tk.get())).grid(row=2, column=2)
# NOTE NOT GREAT I KNOW BUT TEMPORARY:
# define a new frame for the fields to be input
field_input_frame = tk.LabelFrame(r3_frame, text='Fields frame', padx=5, pady=5)
field_input_frame.grid(row=0, column=1)
# define a button that will let the user chose the splitting option
# for 2-forms plotted as stacks.
# define entry boxes | |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from collections import OrderedDict
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.log_testing import LoggingTestCase, LogTesting
from webkitpy.layout_tests.builder_list import BuilderList
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.port.test import LAYOUT_TEST_DIR
from webkitpy.layout_tests.update_test_expectations import main
from webkitpy.layout_tests.update_test_expectations import RemoveFlakesOMatic
from webkitpy.tool.commands.flaky_tests import FlakyTests
class FakeBotTestExpectations(object):
def __init__(self, results_by_path):
self._results = {}
# Make the results distinct like the real BotTestExpectations.
for path, results in results_by_path.iteritems():
self._results[path] = list(set(results))
def all_results_by_path(self):
return self._results
class FakeBotTestExpectationsFactory(object):
def __init__(self):
"""The distinct results seen in at least one run of the test.
For example, if the bot results for mytest.html are:
PASS PASS FAIL PASS TIMEOUT
then _all_results_by_builder would be:
{
'WebKit Linux Trusty' : {
'mytest.html': ['FAIL', 'PASS', 'TIMEOUT']
}
}
"""
self._all_results_by_builder = {}
def expectations_for_builder(self, builder):
if builder not in self._all_results_by_builder:
return None
return FakeBotTestExpectations(self._all_results_by_builder[builder])
class FakePortFactory(PortFactory):
def __init__(self, host, all_build_types=None, all_systems=None):
super(FakePortFactory, self).__init__(host)
self._all_build_types = all_build_types or ()
self._all_systems = all_systems or ()
self._configuration_specifier_macros = {
'mac': ['mac10.10'],
'win': ['win7'],
'linux': ['trusty']
}
def get(self, port_name=None, options=None, **kwargs):
"""Returns an object implementing the Port interface.
This fake object will always return the 'test' port.
"""
port = super(FakePortFactory, self).get('test', None)
port.all_build_types = self._all_build_types
port.all_systems = self._all_systems
port.configuration_specifier_macros_dict = self._configuration_specifier_macros
return port
class MockWebBrowser(object):
def __init__(self):
self.opened_url = None
def open(self, url):
self.opened_url = url
class UpdateTestExpectationsTest(LoggingTestCase):
def setUp(self):
super(UpdateTestExpectationsTest, self).setUp()
self._mock_web_browser = MockWebBrowser()
self._host = MockHost()
self._port = self._host.port_factory.get('test', None)
self._expectation_factory = FakeBotTestExpectationsFactory()
self._flake_remover = RemoveFlakesOMatic(self._host,
self._port,
self._expectation_factory,
self._mock_web_browser)
self._port.configuration_specifier_macros_dict = {
'mac': ['mac10.10'],
'win': ['win7'],
'linux': ['trusty']
}
filesystem = self._host.filesystem
self._write_tests_into_filesystem(filesystem)
def tearDown(self):
super(UpdateTestExpectationsTest, self).tearDown()
def _write_tests_into_filesystem(self, filesystem):
test_list = ['test/a.html',
'test/b.html',
'test/c.html',
'test/d.html',
'test/e.html',
'test/f.html',
'test/g.html']
for test in test_list:
path = filesystem.join(LAYOUT_TEST_DIR, test)
filesystem.write_binary_file(path, '')
def _assert_expectations_match(self, expectations, expected_string):
self.assertIsNotNone(expectations)
stringified_expectations = "\n".join(
x.to_string() for x in expectations)
expected_string = "\n".join(
x.strip() for x in expected_string.split("\n"))
self.assertEqual(stringified_expectations, expected_string)
def _parse_expectations(self, expectations):
"""Parses a TestExpectation file given as string.
This function takes a string representing the contents of the
TestExpectations file and parses it, producing the TestExpectations
object and sets it on the Port object where the script will read it
from.
Args:
expectations: A string containing the contents of the
TestExpectations file to use.
"""
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
self._port.expectations_dict = lambda: expectations_dict
def _define_builders(self, builders_dict):
"""Defines the available builders for the test.
Args:
builders_dict: A dictionary containing builder names to their
attributes, see BuilderList.__init__ for the format.
"""
self._host.builders = BuilderList(builders_dict)
def test_dont_remove_non_flakes(self):
"""Tests that lines that aren't flaky are not touched.
Lines are flaky if they contain a PASS as well as at least one other
failing result.
"""
test_expectations_before = """
# Even though the results show all passing, none of the
# expectations are flaky so we shouldn't remove any.
Bug(test) test/a.html [ Pass ]
Bug(test) test/b.html [ Timeout ]
Bug(test) test/c.html [ Failure Timeout ]
Bug(test) test/d.html [ Rebaseline ]
Bug(test) test/e.html [ NeedsManualRebaseline ]
Bug(test) test/f.html [ NeedsRebaseline ]"""
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS"],
"test/b.html": ["PASS", "PASS"],
"test/c.html": ["PASS", "PASS"],
"test/d.html": ["PASS", "PASS"],
"test/e.html": ["PASS", "PASS"],
"test/f.html": ["PASS", "PASS"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(
updated_expectations, test_expectations_before)
def test_dont_remove_skip(self):
"""Tests that lines with Skip are untouched.
If a line is marked as Skip, it will eventually contain no results,
which is indistinguishable from "All Passing" so don't remove since we
don't know what the results actually are.
"""
test_expectations_before = """
# Skip expectations should never be removed.
Bug(test) test/a.html [ Skip ]
Bug(test) test/b.html [ Skip ]
Bug(test) test/c.html [ Skip ]"""
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS"],
"test/b.html": ["PASS", "IMAGE"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(
updated_expectations, test_expectations_before)
def test_dont_remove_rebaselines(self):
"""Tests that lines with rebaseline expectations are untouched."""
test_expectations_before = """
# Even though the results show all passing, none of the
# expectations are flaky so we shouldn't remove any.
Bug(test) test/a.html [ Failure NeedsRebaseline Pass ]
Bug(test) test/b.html [ Failure Pass Rebaseline ]
Bug(test) test/c.html [ Failure NeedsManualRebaseline Pass ]"""
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS"],
"test/b.html": ["PASS", "PASS"],
"test/c.html": ["PASS", "PASS"]
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(
updated_expectations, test_expectations_before)
def test_all_failure_types(self):
"""Tests that all failure types are treated as failure."""
test_expectations_before = (
"""Bug(test) test/a.html [ Failure Pass ]
Bug(test) test/b.html [ Failure Pass ]
Bug(test) test/c.html [ Failure Pass ]
Bug(test) test/d.html [ Failure Pass ]
# Remove these two since CRASH and TIMEOUT aren't considered
# Failure.
Bug(test) test/e.html [ Failure Pass ]
Bug(test) test/f.html [ Failure Pass ]""")
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "IMAGE"],
"test/b.html": ["PASS", "TEXT"],
"test/c.html": ["PASS", "IMAGE+TEXT"],
"test/d.html": ["PASS", "AUDIO"],
"test/e.html": ["PASS", "CRASH"],
"test/f.html": ["PASS", "TIMEOUT"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(updated_expectations, (
"""Bug(test) test/a.html [ Failure Pass ]
Bug(test) test/b.html [ Failure Pass ]
Bug(test) test/c.html [ Failure Pass ]
Bug(test) test/d.html [ Failure Pass ]"""))
def test_basic_one_builder(self):
"""Tests basic functionality with a single builder.
Test that flaky expectations with results from a single bot showing the
expected failure isn't occurring should be removed. Results with failures
of the expected type shouldn't be removed but other kinds of failures
allow removal.
"""
test_expectations_before = (
"""# Remove this since it's passing all runs.
Bug(test) test/a.html [ Failure Pass ]
# Remove this since, although there's a failure, it's not a timeout.
Bug(test) test/b.html [ Pass Timeout ]
# Keep since we have both crashes and passes.
Bug(test) test/c.html [ Crash Pass ]""")
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS", "PASS"],
"test/b.html": ["PASS", "IMAGE", "PASS"],
"test/c.html": ["PASS", "CRASH", "PASS"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(updated_expectations, (
"""# Keep since we have both crashes and passes.
Bug(test) test/c.html [ Crash Pass ]"""))
def test_all_failure_case(self):
"""Tests that results with all failures are not treated as non-flaky."""
test_expectations_before = (
"""# Keep since it's all failures.
Bug(test) test/a.html [ Failure Pass ]""")
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["IMAGE", "IMAGE", "IMAGE"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(updated_expectations, (
"""# Keep since it's all failures.
Bug(test) test/a.html [ Failure Pass ]"""))
def test_empty_test_expectations(self):
"""Running on an empty TestExpectations file outputs an empty file."""
test_expectations_before = ""
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS", "PASS"],
}
}
updated_expectations = (
self._flake_remover.get_updated_test_expectations())
self._assert_expectations_match(updated_expectations, "")
def test_basic_multiple_builders(self):
"""Tests basic functionality with multiple builders."""
test_expectations_before = (
"""# Remove since it's passing on both builders.
Bug(test) test/a.html [ Failure Pass ]
# Keep since it's failing on the Mac builder.
Bug(test) test/b.html [ Failure Pass ]
# Keep since it's failing on the Linux builder.
Bug(test) test/c.html [ Failure Pass ]""")
self._define_builders({
"WebKit Linux Trusty": {
"port_name": "linux-trusty",
"specifiers": ['Trusty', 'Release']
},
"WebKit Mac10.10": {
"port_name": "mac-mac10.10",
"specifiers": ['Mac10.10', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('mac10.10', 'x86'),
('trusty', 'x86_64'))
self._parse_expectations(test_expectations_before)
self._expectation_factory._all_results_by_builder = {
'WebKit Linux Trusty': {
"test/a.html": ["PASS", "PASS", | |
from pyapprox.models.wrappers import ActiveSetVariableModel
from pyapprox.cvar_regression import smooth_max_function_first_derivative,\
smooth_max_function_second_derivative
import numpy as np
from scipy.optimize import minimize, Bounds
from functools import partial
from scipy.stats import gaussian_kde as KDE
from pyapprox.configure_plots import *
import scipy.stats as ss
from pyapprox.utilities import get_all_sample_combinations
from inspect import signature
def approx_jacobian(func, x, *args, epsilon=np.sqrt(np.finfo(float).eps)):
x0 = np.asfarray(x)
assert x0.ndim == 1 or x0.shape[1] == 1
f0 = np.atleast_1d(func(*((x0,)+args)))
if f0.ndim == 2:
assert f0.shape[1] == 1
f0 = f0[:, 0]
jac = np.zeros([len(x0), len(f0)])
dx = np.zeros(x0.shape)
for i in range(len(x0)):
dx[i] = epsilon
f1 = func(*((x0+dx,)+args))
if f1.ndim == 2:
assert f1.shape[1] == 1
f1 = f1[:, 0]
jac[i] = (f1 - f0)/epsilon
dx[i] = 0.0
return jac.transpose()
def eval_function_at_multiple_design_and_random_samples(function, uq_samples, design_samples):
"""
for functions which only take 1d arrays for uq_samples and design_samples
loop over all combinations and evaluate function at each combination
design_samples vary slowest and uq_samples vary fastest
Let design samples = [[1,2],[2,3]]
uq_samples = [[0, 0, 0],[0, 1, 2]]
Then samples will be
([1, 2], [0, 0, 0])
([1, 2], [0, 1, 2])
([3, 4], [0, 0, 0])
([3, 4], [0, 1, 2])
function(uq_samples,design_samples)
"""
vals = []
# put design samples first so that samples iterates over uq_samples fastest
samples = get_all_sample_combinations(design_samples, uq_samples)
for xx, zz in zip(
samples[:design_samples.shape[0]].T,
samples[design_samples.shape[0]:].T):
# flip xx,zz because functions assumed to take uq_samples then
# design_samples
vals.append(function(zz, xx))
return np.asarray(vals)
def eval_mc_based_jacobian_at_multiple_design_samples(grad, stat_func,
uq_samples, design_samples):
"""
Alternatively I could use
jacobian = [np.mean([constraint_grad_single(z,x) for z in zz.T],axis=0) for x in xx.T]
But I think this implementation will allow better use of concurent evaluations in the
future. For example eval_function_at_multiple_design_and_random_samples could
utilize an asynchronous call over all the sample combinations
TODO combine uq_samples and design samples into one matrix and assume functions
always take a single matrix and not two matrices
"""
grads = eval_function_at_multiple_design_and_random_samples(
grad, uq_samples, design_samples)
ndesign_samples = design_samples.shape[1]
nuq_samples = uq_samples.shape[1]
jacobian = np.array(
[stat_func(grads[ii*nuq_samples:(ii+1)*nuq_samples])
for ii in range(ndesign_samples)])
return jacobian
def check_inputs(uq_samples, design_samples):
if design_samples.ndim == 1:
design_samples = design_samples[:, np.newaxis]
if uq_samples is not None and uq_samples.ndim == 1:
uq_samples = design_samples[:, np.newaxis]
if (uq_samples is not None and
(design_samples.shape[1] > 1 and uq_samples.shape[1] > 1)):
assert design_samples.shape[1] == uq_samples.shape[1]
return uq_samples, design_samples
def deterministic_lower_bound_constraint(constraint_function, lower_bound,
uq_samples, design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
val = lower_bound-constraint_function(uq_samples, design_samples)
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def variance_lower_bound_constraint(constraint_function, lower_bound, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples, design_samples)
val = lower_bound-np.std(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint(constraint_function, lower_bound, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples, design_samples)
val = lower_bound-np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint_jacobian(constraint_function_jacobian, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function_jacobian(uq_samples, design_samples)
val = -np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def quantile_lower_bound_constraint(constraint_function, quantile, lower_bound,
uq_samples, design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
vals = constraint_function(uq_samples, design_samples)
val = (lower_bound-ss.mstats.mquantiles(vals, prob=[quantile]))
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce lower bound
return -val
# from pyapprox.cvar_regression import smooth_conditional_value_at_risk, \
# conditional_value_at_risk
# def cvar_lower_bound_constraint(constraint_function,quantile,lower_bound,eps,
# uq_samples,design_samples):
# uq_samples,design_samples = check_inputs(uq_samples,design_samples)
# assert design_samples.shape[1]==1
# vals = constraint_function(uq_samples,design_samples)
# # -vals because we want to minimize lower tail
# val = (lower_bound-smooth_conditional_value_at_risk(0,eps,quantile,-vals))
# #val = (lower_bound-conditional_value_at_risk(-vals,quantile))
# return val
class MultipleConstraints(object):
def __init__(self, constraints):
self.constraints = constraints
def __call__(self, design_sample, constraint_idx=None):
if constraint_idx is None:
constraint_idx = np.arange(len(self.constraints))
nconstraints = len(constraint_idx)
vals = np.empty(nconstraints)
for ii, jj in enumerate(constraint_idx):
vals[ii] = self.constraints[jj](design_sample)
return vals
class MCStatisticConstraint(object):
def __init__(self, constraint_function, generate_samples, info):
self.constraint_function = constraint_function
self.generate_samples = generate_samples
self.info = info
def __call__(self, design_samples):
uq_samples = self.generate_samples()
constraint_type = self.info['type']
if constraint_type == 'quantile':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
return quantile_lower_bound_constraint(
self.constraint_function, quantile, lower_bound,
uq_samples, design_samples)
elif constraint_type == 'cvar':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
eps = self.info['smoothing_eps']
return cvar_lower_bound_constraint(
constraint_functions[ii], quantile, lower_bound, eps,
uq_samples, design_samples)
elif constraint_type == 'var':
var_lower_bound = self.info['lower_bound']
return variance_lower_bound_constraint(
constraint_functions[ii], lower_bound, uq_samples, design_samples)
else:
raise Exception(
'constraint type (%s) not implemented' % constraint_type[ii])
class DeterministicConstraint(object):
def __init__(self, constraint_function, info):
self.constraint_function = constraint_function
self.info = info
def __call__(self, design_samples):
lower_bound = self.info['lower_bound']
uq_nominal_sample = self.info['uq_nominal_sample']
return deterministic_lower_bound_constraint(
self.constraint_function, lower_bound, uq_nominal_sample,
design_samples)
def setup_inequality_constraints(constraint_functions, constraints_info,
uq_samples):
constraints = []
for ii in range(len(constraint_functions)):
info = constraints_info[ii]
constraint_type = info['type']
if constraint_type == 'quantile':
quantile = info['quantile']
quantile_lower_bound = info['quantile_lower_bound']
ineq_cons_fun = partial(
quantile_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, uq_samples)
elif constraint_type == 'cvar':
quantile = info['quantile']
quantile_lower_bound = info['cvar_lower_bound']
eps = info['smoothing_eps']
ineq_cons_fun = partial(
cvar_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, eps, uq_samples)
elif constraint_type == 'var':
var_lower_bound = info['var_lower_bound']
ineq_cons_fun = partial(
variance_lower_bound_constraint, constraint_functions[ii],
var_lower_bound, uq_samples)
elif constraint_type == 'deterministic':
lower_bound = info['lower_bound']
ineq_cons_fun = partial(
deterministic_lower_bound_constraint, constraint_functions[ii],
lower_bound, uq_samples)
else:
raise Exception(
'constraint type (%s) not implemented' % constraint_type[ii])
ineq_cons = {'type': 'ineq', 'fun': ineq_cons_fun}
constraints.append(ineq_cons)
return constraints
def run_design(objective, init_design_sample,
constraints, bounds, optim_options):
opt_history = [init_design_sample[:, 0]]
def callback(xk):
opt_history.append(xk)
# print(objective(xk))
#print([constraints[ii]['fun'](xk) for ii in [0,1]])
# opt_method = 'SLSQP'
# res = minimize(
# objective, init_design_sample[:,0], method=opt_method, jac=None,
# constraints=constraints,
# options=optim_options,bounds=bounds,callback=callback)
from scipy.optimize import fmin_slsqp
res = fmin_slsqp(objective, init_design_sample[:, 0], f_ieqcons=constraints,
bounds=bounds, callback=callback, full_output=True) # , **optim_options)
class result():
def __init__(self, x, fun):
self.x = np.atleast_1d(x)
self.fun = fun
res = result(res[0], res[1])
opt_history = (np.array(opt_history)).T
return res, opt_history
def plot_optimization_history(obj_function, constraints, uq_samples, opt_history,
plot_limits):
# fig,axs=plot_optimization_objective_and_constraints_2D(
# [constraints[ii]['fun'] for ii in range(len(constraints))],
# partial(obj_function,uq_samples[:,0]),plot_limits)
fig, axs = plot_optimization_objective_and_constraints_2D(
constraints, partial(obj_function, uq_samples[:, 0]), plot_limits)
# objective can only be evaluated at one uq_sample thus use of
# uq_samples[:,0]
for ii in range(len(axs)):
axs[ii].plot(opt_history[0, :], opt_history[1, :], 'ko')
for jj, txt in enumerate(range(opt_history.shape[1])):
axs[ii].annotate(
'%d' % txt, (opt_history[0, jj], opt_history[1, jj]))
return fig, axs
# def plot_optimization_objective_and_constraints_2D(
# constraint_functions,objective,plot_limits):
def plot_optimization_objective_and_constraints_2D(
constraints, objective, plot_limits):
from pyapprox.visualization import get_meshgrid_function_data
num_pts_1d = 100
num_contour_levels = 30
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6))
# for ii in range(len(constraint_functions)+1):
for ii in range(len(constraints.constraints)+1):
# if ii==len(constraint_functions):
if ii == len(constraints.constraints):
function = objective
else:
# def function(design_samples):
# vals = np.empty((design_samples.shape[1]))
# for jj in range(design_samples.shape[1]):
# vals[jj]=constraint_functions[ii](design_samples[:,jj])
# return vals
def function(design_samples):
vals = np.empty((design_samples.shape[1]))
for jj in range(design_samples.shape[1]):
vals[jj] = constraints(design_samples[:, jj], [ii])
return vals
X, Y, Z = get_meshgrid_function_data(
function, plot_limits, num_pts_1d)
norm = None
cset = axs[ii].contourf(
X, Y, Z, levels=np.linspace(Z.min(), Z.max(), num_contour_levels),
cmap=mpl.cm.coolwarm,
norm=norm)
# for kk in range(len(constraint_functions)):
for kk in range(len(constraints.constraints)):
if ii == kk:
ls = '-'
else:
ls = '--'
axs[kk].contour(X, Y, Z, levels=[0], colors='k', linestyles=ls)
plt.colorbar(cset, ax=axs[ii])
return fig, axs
def plot_constraint_pdfs(constraint_functions, uq_samples, design_sample,
fig_pdf=None, axs_pdf=None, label=None, color=None):
colors = ['b', 'gray']
nconstraints = len(constraint_functions)
if axs_pdf is None:
fig_pdf, axs_pdf = plt.subplots(
1, nconstraints, figsize=(nconstraints*8, 6))
for ii in range(nconstraints):
# evaluate constraint function at each of the uq samples
constraint_function_vals = constraint_functions[ii](
uq_samples, design_sample)
constraint_kde = KDE(constraint_function_vals)
yy = np.linspace(constraint_function_vals.min(),
constraint_function_vals.max(), 101)
axs_pdf[ii].fill_between(yy, 0, constraint_kde(yy), alpha=0.5, label=label,
color=color)
axs_pdf[ii].axvline(0, color='k')
# axs_pdf[ii].axvline(constraints[ii]['fun'](design_sample),color='r')
return fig_pdf, axs_pdf
def plot_constraint_cdfs(constraints, constraint_functions, uq_samples,
design_sample, quantile, fig_cdf, axs_cdf=None, label=None,
color=None):
nconstraints = len(constraint_functions)
if axs_cdf is None:
fig_cdf, axs_cdf = plt.subplots(
1, nconstraints, figsize=(nconstraints*8, 6))
for ii in range(nconstraints):
constraint_function_vals = constraint_functions[ii](
uq_samples, design_sample)
cvar = (conditional_value_at_risk(-constraint_function_vals, 0.9))
cvars = (smooth_conditional_value_at_risk(
0, 1e-3, 0.9, -constraint_function_vals))
print('cvar', cvar)
print('cvars', cvars)
#constraint_val = constraints[ii]['fun'](design_sample)
constraint_val = constraints(design_sample, [ii])
constraint_function_vals.sort()
cdf_vals = np.linspace(0, 1, constraint_function_vals.shape[0]+1)[1:]
axs_cdf[ii].plot(constraint_function_vals, cdf_vals, label=label,
color=color)
#I = np.where(constraint_function_vals<=constraint_val)[0]
I = np.where(constraint_function_vals <= 0)[0]
axs_cdf[ii].fill_between(
constraint_function_vals[I], 0, cdf_vals[I], alpha=0.5, color=color)
axs_cdf[ii].axvline(0, color='k')
J = np.where(constraint_function_vals <= 0)[0]
#print (J.shape[0]/float(constraint_function_vals.shape[0]),'p failure',constraint_val,J.shape[0])
# Compute the constraint value. This combines constraint_function_vals
# into a scalar value
# axs_cdf[ii].axvline(constraint_val,color='r')
| |
178, 364, 881, 102, 889, 873, 976,
191, 973, 742, 680, 718, 585, 924, 638, 936,
62, 644],
[565, 513, 575, 193, 561, 750, 953, 226, 691,
562, 655, 294, 877, 651, 343, 328, 599, 277,
883, 447],
[224, 782, 630, 902, 677, 276, 35, 489, 941,
122, 950, 593, 808, 738, 901, 228, 621, 730,
567, 484],
[252, 491, 679, 882, 157, 6, 674, 542, 384, 508,
93, 981, 502, 342, 732, 265, 135, 309, 814,
377],
[609, 16, 276, 999, 676, 620, 662, 276, 598, 79,
983, 105, 959, 328, 7, 486, 112, 484, 117,
970],
[592, 391, 807, 39, 654, 757, 676, 569, 589,
920, 935, 443, 821, 220, 406, 551, 649, 605,
753, 277],
[474, 183, 917, 831, 371, 55, 70, 631, 827, 1,
526, 648, 466, 575, 916, 776, 237, 18, 671,
244]]),
[907, 736, 956, 378, 282, 128, 890, 360, 476, 774, 662,
76, 440, 146, 260, 503, 594, 753, 601, 758, 616, 885,
693, 77, 591, 900, 172, 451, 902, 278, 232, 609, 644,
447, 484, 377, 970, 277, 244, 671, 18, 237, 776, 916,
575, 466, 648, 526, 1, 827, 631, 70, 55, 371, 831,
917, 183, 474, 592, 609, 252, 224, 565, 190, 393, 826,
842, 626, 824, 212, 861, 567, 490, 449, 252, 154, 508,
696, 345, 591, 993, 883, 517, 744, 441, 519, 59, 241,
932, 612, 853, 681, 580, 189, 645, 850, 851, 795, 419,
648, 9, 488, 931, 943, 400, 62, 883, 567, 814, 117,
753, 605, 649, 551, 406, 220, 821, 443, 935, 920, 589,
569, 676, 757, 654, 39, 807, 391, 16, 491, 782, 513,
982, 920, 354, 984, 774, 811, 651, 605, 200, 627, 571,
776, 960, 174, 414, 719, 865, 586, 514, 147, 885, 941,
624, 902, 920, 281, 788, 666, 368, 526, 37, 488, 789,
616, 23, 901, 64, 958, 936, 277, 730, 309, 484, 112,
486, 7, 328, 959, 105, 983, 79, 598, 276, 662, 620,
676, 999, 276, 679, 630, 575, 178, 761, 702, 751, 505,
117, 759, 491, 336, 711, 489, 461, 175, 497, 36, 529,
700, 833, 843, 360, 129, 148, 858, 887, 601, 586, 846,
745, 177, 421, 491, 464, 678, 45, 638, 599, 621, 135,
265, 732, 342, 502, 981, 93, 508, 384, 542, 674, 6,
157, 882, 902, 193, 364, 889, 996, 823, 848, 519, 532,
552, 555, 829, 183, 271, 1, 656, 384, 296, 344, 478,
251, 806, 930, 50, 500, 500, 834, 332, 455, 997, 120,
200, 924, 328, 228, 901, 738, 808, 593, 950, 122, 941,
489, 35, 276, 677, 561, 881, 953, 583, 776, 925, 107,
972, 577, 849, 867, 680, 585, 750, 865, 85, 520, 229,
438, 712, 716, 203, 644, 893, 340, 237, 902, 585, 343,
651, 877, 294, 655, 562, 691, 226, 953, 750, 102, 112,
195, 261, 320, 371, 701, 316, 923, 18, 304, 237, 105,
132, 420, 458, 619, 501, 639, 308, 328, 800, 718, 680,
742, 973, 191, 976, 873, 889, 493, 939, 411, 635, 638,
178, 862, 383, 683, 647, 361, 804, 625, 778, 555, 337,
37, 549, 212, 937, 517, 891, 381, 239, 85, 841, 931,
397, 381, 19, 154, 248, 167, 991, 830, 649, 490, 491,
700, 980, 123, 828])
def test_snail_095(self):
self.assertEqual(snail([[247, 36, 147, 670, 85, 302, 290, 318, 625, 571,
925, 293, 329, 386, 513, 32],
[886, 355, 260, 484, 589, 633, 64, 999, 160,
927, 937, 306, 722, 480, 171, 593],
[243, 262, 207, 601, 850, 221, 834, 478, 394, 6,
926, 500, 705, 771, 947, 559],
[894, 64, 204, 221, 196, 17, 465, 978, 251, 395,
208, 623, 457, 274, 198, 982],
[826, 24, 211, 166, 285, 800, 358, 180, 336,
708, 965, 855, 607, 283, 186, 114],
[177, 887, 42, 168, 420, 708, 632, 953, 929,
246, 355, 617, 576, 783, 892, 527],
[393, 714, 22, 905, 724, 749, 226, 128, 689,
924, 203, 353, 502, 583, 363, 249],
[633, 275, 241, 730, 109, 748, 482, 465, 672,
567, 739, 772, 677, 299, 492, 832],
[701, 706, 283, 866, 551, 893, 928, 136, 822,
892, 100, 11, 686, 759, 780, 799],
[818, 515, 137, 699, 122, 187, 587, 708, 819,
842, 689, 234, 229, 763, 484, 512],
[770, 663, 833, 676, 994, 54, 207, 133, 444,
707, 541, 23, 588, 214, 752, 980],
[121, 54, 432, 672, 767, 47, 945, 497, 433, 422,
913, 688, 703, 289, 933, 736],
[80, 683, 447, 359, 245, 935, 348, 196, 118,
637, 938, 270, 532, 97, 647, 329],
[385, 201, 425, 426, 579, 166, 983, 31, 646,
810, 156, 102, 151, 13, 212, 127],
[677, 439, 224, 931, 557, 572, 31, 122, 107,
812, 796, 934, 956, 74, 372, 311],
[807, 154, 33, 598, 333, 42, 7, 937, 312, 911,
186, 918, 962, 554, 746, 436]]),
[247, 36, 147, 670, 85, 302, 290, 318, 625, 571, 925,
293, 329, 386, 513, 32, 593, 559, 982, 114, 527, 249,
832, 799, 512, 980, 736, 329, 127, 311, 436, 746, 554,
962, 918, 186, 911, 312, 937, 7, 42, 333, 598, 33,
154, 807, 677, 385, 80, 121, 770, 818, 701, 633, 393,
177, 826, 894, 243, 886, 355, 260, 484, 589, 633, 64,
999, 160, 927, 937, 306, 722, 480, 171, 947, 198, 186,
892, 363, 492, 780, 484, 752, 933, 647, 212, 372, 74,
956, 934, 796, 812, 107, 122, 31, 572, 557, 931, 224,
439, 201, 683, 54, 663, 515, 706, 275, 714, 887, 24,
64, 262, 207, 601, 850, 221, 834, 478, 394, 6, 926,
500, 705, 771, 274, 283, 783, 583, 299, 759, 763, 214,
289, 97, 13, 151, 102, 156, 810, 646, 31, 983, 166,
579, 426, 425, 447, 432, 833, 137, 283, 241, 22, 42,
211, 204, 221, 196, 17, 465, 978, 251, 395, 208, 623,
457, 607, 576, 502, 677, 686, 229, 588, 703, 532, 270,
938, 637, 118, 196, 348, 935, 245, 359, 672, 676, 699,
866, 730, 905, 168, 166, 285, 800, 358, 180, 336, 708,
965, 855, 617, 353, 772, 11, 234, 23, 688, 913, 422,
433, 497, 945, 47, 767, 994, 122, 551, 109, 724, 420,
708, 632, 953, 929, 246, 355, 203, 739, 100, 689, 541,
707, 444, 133, 207, 54, 187, 893, 748, 749, 226, 128,
689, 924, 567, 892, 842, 819, 708, 587, 928, 482, 465,
672, 822, 136])
def test_snail_096(self):
self.assertEqual(snail([[433, 873, 34, 538, 182, 479, 447, 919, 491,
799, 321, 798, 96, 351, 199, 595, 384],
[688, 520, 440, 10, 768, 283, 286, 980, 786,
632, 724, 772, 776, 791, 526, 902, 143],
[221, 380, 963, 134, 81, 12, 212, 931, 854, 929,
258, 266, 191, 692, 975, 245, 686],
[371, 60, 849, 373, 934, 222, 750, 480, 817,
384, 623, 223, 965, 716, 502, 306, 419],
[137, 668, 412, 520, 759, 695, 35, 791, 512,
272, 880, 453, 79, 2, 813, 383, 715],
[350, 505, 927, 713, 478, 969, 462, 3, 343, 237,
219, 780, 231, 486, 539, 82, 129],
[405, 363, 901, 599, 117, 102, 317, 683, 880,
226, 757, 863, 175, 434, 903, 555, 152],
[918, 331, 443, 864, 933, 126, 463, 526, 570,
243, 866, 184, 895, 478, 413, 143, 900],
[976, 855, 41, 630, 829, 195, 443, 10, 447, 401,
592, 779, 213, 162, 359, 592, 496],
[892, 131, 875, 900, 416, 266, 524, 162, 561,
14, 148, 103, 869, 412, 229, 490, 961],
[589, 282, 373, 491, 878, 25, 541, 207, 642,
380, 971, 581, 721, 500, 135, 98, 425],
[523, 846, 203, 737, 445, 213, 138, 238, 295,
272, 338, 760, 539, 354, 195, 109, 271],
[948, 521, 513, 819, 497, 73, 487, 760, 899,
687, 330, 409, 476, 725, 3, 261, 101],
[690, 406, 882, | |
<filename>sandbox/grist/test_engine.py
import difflib
import functools
import json
import unittest
from collections import namedtuple
from pprint import pprint
import six
import actions
import column
import engine
import logger
import useractions
import testutil
import objtypes
log = logger.Logger(__name__, logger.DEBUG)
# These are for use in verifying metadata using assertTables/assertViews methods. E.g.
# self.assertViews([View(1, sections=[Section(1, parentKey="record", tableRef=1, fields=[
# Field(1, colRef=11) ]) ]) ])
Table = namedtuple('Table', ('id tableId primaryViewId summarySourceTable columns'))
Column = namedtuple('Column', ('id colId type isFormula formula summarySourceCol'))
View = namedtuple('View', 'id sections')
Section = namedtuple('Section', 'id parentKey tableRef fields')
Field = namedtuple('Field', 'id colRef')
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
unittest.TestCase.assertRegex = unittest.TestCase.assertRegexpMatches
class EngineTestCase(unittest.TestCase):
"""
Provides functionality for verifying engine actions and data, which is general enough to be
useful for other tests. It is also used by TestEngine below.
"""
# Place to keep the original log handler (which we modify for the duration of the test).
# We can't use cls._orig_log_handler directly because then Python it's an actual class method.
_orig_log_handler = []
@classmethod
def setUpClass(cls):
cls._orig_log_handler.append(logger.set_handler(testutil.limit_log_stderr(logger.WARN)))
@classmethod
def tearDownClass(cls):
logger.set_handler(cls._orig_log_handler.pop())
def setUp(self):
"""
Initial setup for each test case.
"""
self.engine = engine.Engine()
self.engine.load_empty()
# Set up call tracing to count calls (formula evaluations) for each column for each table.
self.call_counts = {}
def trace_call(col_obj, _rec):
# Ignore formulas in metadata tables for simplicity. Such formulas are mostly private, and
# it would be annoying to fix tests every time we change them.
if not col_obj.table_id.startswith("_grist_"):
tmap = self.call_counts.setdefault(col_obj.table_id, {})
tmap[col_obj.col_id] = tmap.get(col_obj.col_id, 0) + 1
self.engine.formula_tracer = trace_call
# This is set when a test case is wrapped by `test_engine.test_undo`.
self._undo_state_tracker = None
@classmethod
def _getEngineDataLines(cls, engine_data, col_names=[]):
"""
Helper for assertEqualEngineData, which returns engine data represented as lines of text
suitable for diffing. If col_names is given, it determines the order of columns (columns not
found in this list are included in the end and sorted by name).
"""
sort_keys = {c: i for i, c in enumerate(col_names)}
ret = []
for table_id, table_data in sorted(engine_data.items()):
ret.append("TABLE %s\n" % table_id)
col_items = sorted(table_data.columns.items(),
key=lambda c: (sort_keys.get(c[0], float('inf')), c))
col_items.insert(0, ('id', table_data.row_ids))
table_rows = zip(*[[col_id] + values for (col_id, values) in col_items])
ret.extend(json.dumps(row) + "\n" for row in table_rows)
return ret
def assertEqualDocData(self, observed, expected, col_names=[]):
"""
Compare full engine data, as a mapping of table_ids to TableData objects, and reporting
differences with a customized diff (similar to the JSON representation in the test script).
"""
enc_observed = actions.encode_objects(observed)
enc_expected = actions.encode_objects(expected)
if enc_observed != enc_expected:
o_lines = self._getEngineDataLines(enc_observed, col_names)
e_lines = self._getEngineDataLines(enc_expected, col_names)
self.fail("Observed data not as expected:\n" +
"".join(difflib.unified_diff(e_lines, o_lines,
fromfile="expected", tofile="observed")))
def assertCorrectEngineData(self, expected_data):
"""
Verifies that the data engine contains the same data as the given expected data,
which should be a dictionary mapping table names to TableData objects.
"""
expected_output = actions.decode_objects(expected_data)
meta_tables = self.engine.fetch_table("_grist_Tables")
output = {t: self.engine.fetch_table(t) for t in meta_tables.columns["tableId"]}
output = testutil.replace_nans(output)
self.assertEqualDocData(output, expected_output)
def getFullEngineData(self):
return testutil.replace_nans({t: self.engine.fetch_table(t) for t in self.engine.tables})
def assertPartialData(self, table_name, col_names, row_data):
"""
Verifies that the data engine contains the right data for the given col_names (ignoring any
other columns).
"""
expected = testutil.table_data_from_rows(table_name, col_names, row_data)
observed = self.engine.fetch_table(table_name, private=True)
ignore = set(observed.columns) - set(expected.columns)
for col_id in ignore:
del observed.columns[col_id]
self.assertEqualDocData({table_name: observed}, {table_name: expected})
action_group_action_fields = ("stored", "undo", "calc", "direct")
@classmethod
def _formatActionGroup(cls, action_group, use_repr=False):
"""
Helper for assertEqualActionGroups below.
"""
lines = ["{"]
for (k, action_list) in sorted(action_group.items()):
if k in cls.action_group_action_fields:
for a in action_list:
rep = repr(a) if use_repr else json.dumps(a, sort_keys=True)
lines.append("%s: %s," % (k, rep))
else:
lines.append("%s: %s," % (k, json.dumps(action_list)))
lines.append("}")
return lines
def assertEqualActionGroups(self, observed, expected):
"""
Compare grouped doc actions, reporting differences with a customized diff
(a bit more readable than unittest's usual diff).
"""
# Do some clean up on the observed data.
observed = testutil.replace_nans(observed)
# Convert observed and expected actions into a comparable form.
for k in self.action_group_action_fields:
if k in observed:
observed[k] = [get_comparable_repr(v) for v in observed[k]]
if k in expected:
expected[k] = [get_comparable_repr(v) for v in expected[k]]
if observed != expected:
o_lines = self._formatActionGroup(observed)
e_lines = self._formatActionGroup(expected)
self.fail(("Observed out actions not as expected:\n") +
"\n".join(difflib.unified_diff(e_lines, o_lines, n=3, lineterm="",
fromfile="expected", tofile="observed")))
def assertOutActions(self, out_action_group, expected_group):
"""
Compares action group returned from engine.apply_user_actions() to expected actions as listed
in testscript. The array of retValues is only checked if present in expected_group.
"""
for k in self.action_group_action_fields:
# For comparing full actions, treat omitted groups (e.g. "calc") as expected to be empty.
expected_group.setdefault(k, [])
observed = {k: getattr(out_action_group, k) for k in self.action_group_action_fields }
if "retValue" in expected_group:
observed["retValue"] = out_action_group.retValues
self.assertEqualActionGroups(observed, expected_group)
def assertPartialOutActions(self, out_action_group, expected_group):
"""
Compares a single action group as returned from engine.apply_user_actions() to expected
actions, checking only those fields that are included in the expected_group dict.
"""
observed = {k: getattr(out_action_group, k) for k in expected_group}
self.assertEqualActionGroups(observed, expected_group)
def dump_data(self):
"""
Prints a dump of all engine data, for help in writing / debugging tests.
"""
output = {t: self.engine.fetch_table(t) for t in self.engine.schema}
output = testutil.replace_nans(output)
output = actions.encode_objects(output)
print(''.join(self._getEngineDataLines(output)))
def dump_actions(self, out_actions):
"""
Prints out_actions in human-readable format, for help in writing / debugging tets.
"""
pprint({
k: [get_comparable_repr(action) for action in getattr(out_actions, k)]
for k in self.action_group_action_fields
})
def assertTableData(self, table_name, data=[], cols="all", rows="all", sort=None):
"""
Verify some or all of the data in the table named `table_name`.
- data: an array of rows, with first row containing column names starting with "id", and
other rows also all starting with row_id.
- cols: may be "all" (default) to match all columns, or "subset" to match only those listed.
- rows: may be "all" (default) to match all rows, or "subset" to match only those listed,
or a function called with a Record to return whether to include it.
- sort: optionally a key function called with a Record, for sorting observed rows.
"""
assert data[0][0] == 'id', "assertRecords requires 'id' as the first column"
col_names = data[0]
row_data = data[1:]
expected = testutil.table_data_from_rows(table_name, col_names, row_data)
table = self.engine.tables[table_name]
columns = [c for c in table.all_columns.values()
if c.col_id != "id" and not column.is_virtual_column(c.col_id)]
if cols == "all":
pass
elif cols == "subset":
columns = [c for c in columns if c.col_id in col_names]
else:
raise ValueError("assertRecords: invalid value for cols: %s" % (cols,))
if rows == "all":
row_ids = list(table.row_ids)
elif rows == "subset":
row_ids = [row[0] for row in row_data]
elif callable(rows):
row_ids = [r.id for r in table.user_table.all if rows(r)]
else:
raise ValueError("assertRecords: invalid value for rows: %s" % (rows,))
if sort:
row_ids.sort(key=lambda r: sort(table.get_record(r)))
observed_col_data = {c.col_id: [c.raw_get(r) for r in row_ids] for c in columns if c.col_id != "id"}
observed = actions.TableData(table_name, row_ids, observed_col_data)
self.assertEqualDocData({table_name: observed}, {table_name: expected},
col_names=col_names)
def assertTables(self, list_of_tables):
"""
Verifies that the given Table test-records correspond to the metadata for tables/columns.
"""
self.assertPartialData('_grist_Tables',
["id", "tableId", "primaryViewId", "summarySourceTable"],
sorted((tbl.id, tbl.tableId, tbl.primaryViewId, tbl.summarySourceTable)
for tbl in list_of_tables))
self.assertPartialData('_grist_Tables_column',
["id", "parentId", "colId", "type",
"isFormula", "formula", "summarySourceCol"],
sorted((col.id, tbl.id, col.colId, col.type,
col.isFormula, col.formula, col.summarySourceCol)
for tbl in list_of_tables
for col in tbl.columns))
def assertFormulaError(self, exc, type_, message, tracebackRegexp=None):
self.assertIsInstance(exc, objtypes.RaisedException)
self.assertIsInstance(exc.error, type_)
self.assertEqual(str(exc.error), message)
if tracebackRegexp:
self.assertRegex(exc.details, tracebackRegexp)
def assertViews(self, list_of_views):
"""
Verifies that the given View test-records correspond to the metadata for views/sections/fields.
"""
self.assertPartialData('_grist_Views', ["id"],
[[view.id] for view in list_of_views])
self.assertPartialData('_grist_Views_section', ["id", "parentId", "parentKey", "tableRef"],
sorted((sec.id, view.id, sec.parentKey, sec.tableRef)
for view in list_of_views
for sec in view.sections))
self.assertTableData('_grist_Views_section_field', sort=(lambda r: r.parentPos),
cols="subset",
data=[["id", "parentId", "colRef"]] + sorted(
((field.id, sec.id, field.colRef)
for view in list_of_views
for sec in view.sections
for field in sec.fields), key=lambda t: t[1])
)
def load_sample(self, sample):
"""
Load the data engine with given sample data. The sample is a dict with keys "SCHEMA" and
"DATA", each a dictionary mapping table names to actions.TableData objects. "SCHEMA" contains
"_grist_Tables" and "_grist_Tables_column" tables.
"""
schema = sample["SCHEMA"]
self.engine.load_meta_tables(schema['_grist_Tables'], schema['_grist_Tables_column'])
for data in six.itervalues(sample["DATA"]):
self.engine.load_table(data)
# We used to call load_done() at the end; in practice, Grist's ActiveDoc does not call
# load_done, but | |
try:
import numpy as np
import networkx as nx
from hmm_profile import reader
from hmm_profile.models import HMM
import time
except ImportError:
print('[Error] Seems you do not have the required python packages. Please check it.')
# python modules
from math import log
from typing import List, Dict, Tuple, Any
from collections.abc import Mapping
from typing import NewType
# NanoVir modules
from codon_table import standard_codon
from correct import DAG, Idx, NodeId, Base
ProteinCode = NewType('ProteinCode', str)
class PHMM:
"""The class encapsulating HMM_profile HMM object.
The overall structure of this class is adapted from https://github.com/janmax/Profile-HMM.
Attributes:
_phmm: A HMM_profile HMM object.
_alphabet: A list of alphabets used in HMM. In this module, protein code.
_alphabet_to_index: A list of index coverted from each protein code.
_len: An integer count of the residues in HMM.
_transmissions: A numpy array containing the transmission probabilities from each residue to residue.
_emissions_from_M: A numpy array containing the match state emission probabilities at each residue.
_emissions_from_I: A numpy array containing the insertion state emission probabilities at each residue.
"""
def __init__(self, phmm_ : HMM):
"""Inits PHMM with a given HMM_profile HMM object."""
# Getting the length
self._phmm : HMM = phmm_
self._alphabet : List[ProteinCode] = [ProteinCode(x) for x in self._phmm.metadata.alphabet]
self._alphabet_to_index : Dict[ProteinCode, Idx] = {value: Idx(index) for index, value in enumerate(self._alphabet)}
self._len : int = self._phmm.metadata.length
# Transferting the probabilities
self._transmissions : np.ndarray = self.transfer_transmissions()
self._emissions_from_M : np.ndarray
self._emissions_from_I : np.ndarray
self._emissions_from_M, self._emissions_from_I = self.transfer_emissons()
def __len__(self) -> int:
"""Return the number of residues in the HMM."""
return self._len
def modified_viterbi(self, dag_ : DAG) -> List[NodeId]:
"""Return the path corrected with viterbi algorithm.
Generate data objects to store predecessors, ancestors and base of each node where every node id is converted into index in ordering for numpy operation.
"""
predecessors : List[List[Idx]] = [[]] * len(dag_)
for i in range(len(dag_)):
predecessors[i] = [ dag_.node_id_to_index(predecessor_id) for predecessor_id in dag_.predecessors(dag_.index_to_node_id(i)) ]
ancestors : List[List[List[Idx]]] = [[]]* len(dag_)
for i in range(len(dag_)):
for ancestor_list in dag_.ancestors(dag_.index_to_node_id(i)):
index_converted_ancestor_list : List[Idx] = [ dag_.node_id_to_index(ancestor_id) for ancestor_id in ancestor_list ]
if not ancestors[i]:
ancestors[i] = [ index_converted_ancestor_list ]
else:
ancestors[i].append(index_converted_ancestor_list)
bases : List[Base]= [''] * len(dag_)
for i in range(len(dag_)):
bases[i] = Base(dag_.base(dag_.index_to_node_id(i)))
print("Viterbi algorithm starts", time.strftime("%H:%M:%S", time.localtime()))
tr, max_tr_idx = self._modified_viterbi(
predecessors,
ancestors,
bases,
self._emissions_from_M,
self._emissions_from_I,
self._transmissions,
standard_codon,
self._alphabet_to_index,
len(dag_),
len(self)
)
print("Viterbi algorithm done", time.strftime("%H:%M:%S", time.localtime()))
print("Viterbi traceback starts", time.strftime("%H:%M:%S", time.localtime()))
corrected_path : List[NodeId] = [ dag_.index_to_node_id(x) for x in self.traceback(tr, max_tr_idx) ]
print("Viterbi traceback done", time.strftime("%H:%M:%S", time.localtime()))
return corrected_path
#@staticmethod
#@jit(nopython=True)
def _modified_viterbi(self, predecessors_ : List[List[Idx]], ancestors_ : List[List[List[Idx]]], bases_ : List[Base], e_M_ : np.ndarray, e_I_ : np.ndarray, a_ : np.ndarray, codon_dict_ : Mapping, alphabet_to_index_ : Dict[ProteinCode, Idx], N_ : int, L_ : int):
"""Inner function for Viterbi algorithm.
TO DO: optimize the performance with numba compile strategy. Suspect tr list accounting for error.
"""
V_M : np.ndarray = np.array(np.ones((N_, L_)) * -np.inf)
V_I : np.ndarray = np.array(np.ones((N_, L_)) * -np.inf)
V_D : np.ndarray = np.array(np.ones((N_, L_)) * -np.inf)
V_N : np.ndarray = np.ones(N_) * -np.inf
V_C : np.ndarray = np.ones(N_) * -np.inf
V_M_tr : np.ndarray = np.array(np.zeros((N_, L_)), dtype=[('alignment_type', 'i'), ('dag_node', 'i'), ('hmm_residue', 'i'), ('parent', 'i'), ('grandparent', 'i')]) # first: type of alignment 0 - M, 1 - I, 2 - D, 3 - N, 4 - C
V_I_tr : np.ndarray = np.array(np.zeros((N_, L_)), dtype=[('alignment_type', 'i'), ('dag_node', 'i'), ('hmm_residue', 'i'), ('parent', 'i'), ('grandparent', 'i')]) # second: DAG node index
V_D_tr : np.ndarray = np.array(np.zeros((N_, L_)), dtype=[('alignment_type', 'i'), ('dag_node', 'i'), ('hmm_residue', 'i')]) # third: hmm residue index
V_N_tr : np.ndarray = np.zeros(N_, dtype=[('alignment_type', 'i'), ('dag_node', 'i'), ('hmm_residue', 'i')]) # fourth: parent node ordering index
V_C_tr : np.ndarray = np.zeros(N_, dtype=[('alignment_type', 'i'), ('dag_node', 'i'), ('hmm_residue', 'i')]) # fifth: grandparent node ordering index
tr : List[np.ndarray] = [V_M_tr, V_I_tr, V_D_tr, V_N_tr, V_C_tr]
V_N[0] = 0
for i in range(1, N_): # Node index in topological order
for p in predecessors_[i]: # x_i^(1)
assert p < i
n_to_n = V_N[p] # N->N
if n_to_n > V_N[i]:
V_N[i] = n_to_n
tr[3][i] = (3,p,0)
max_idx = np.argmax(V_M[p, :])
m_to_c = V_M[p, max_idx] # M->C
if m_to_c > V_C[i]:
V_C[i] = m_to_c
tr[4][i] = (0,p,max_idx)
c_to_c = V_C[p] # C->C
if c_to_c > V_C[i]:
V_C[i] = c_to_c
tr[4][i] = (4,p,0)
if i == N_-1 or bases_[i] =='^' or bases_[i] == '$':
continue
if ancestors_[i]:
for ancestor_list in ancestors_[i]:
gg = ancestor_list[2] # x_i^(3) grandgrandparent
g = ancestor_list[1] # x_i^(2) grandparent
p = ancestor_list[0] # x_i^(1) parent
assert gg < i
assert g < i
assert p < i
codon = bases_[g] + bases_[p] + bases_[i]
T = codon_dict_[codon]
# skip if stop codon
if T == '*':
continue
x = alphabet_to_index_[T]
for j in range(L_): # HMM residue index
if j != 0: # skip first residue
m_to_m = log(e_M_[x][j+1]) - log(e_M_[x][0]) + V_M[gg, j-1] + log(a_[0][j]) # M->M
if m_to_m > V_M[i, j]:
V_M[i, j] = m_to_m
tr[0][i,j] = (0, gg, j-1, p, g)
i_to_m = log(e_M_[x][j+1]) - log(e_M_[x][0]) + V_I[gg, j-1] + log(a_[3][j]) # I->M
if i_to_m > V_M[i, j]:
V_M[i, j] = i_to_m
tr[0][i,j] = (1, gg, j-1, p, g)
d_to_m = log(e_M_[x][j+1]) - log(e_M_[x][0]) + V_D[gg, j-1] + log(a_[5][j]) # D->M
if d_to_m > V_M[i, j]:
V_M[i, j] = d_to_m
tr[0][i,j] = (2, gg, j-1, p, g)
n_to_m = log(e_M_[x][j+1]) - log(e_M_[x][0]) + V_N[gg] # N->M
if n_to_m > V_M[i, j]:
V_M[i, j] = n_to_m
tr[0][i,j] = (3, gg, 0, p, g)
m_to_i = log(e_I_[x][j+1]) - log(e_I_[x][0]) + V_M[gg, j] + log(a_[1][j+1]) # M->I
if m_to_i > V_I[i, j]:
V_I[i, j] = m_to_i
tr[1][i,j] = (0, gg, j, p, g)
i_to_i = log(e_I_[x][j+1]) - log(e_I_[x][0]) + V_I[gg, j] + log(a_[4][j+1]) # I->I
if i_to_i > V_I[i, j]:
V_I[i, j] = i_to_i
tr[1][i,j] = (1, gg, j, p, g)
if j != 0 and j != L_-1: # skip first and last residues
m_to_d = V_M[i, j-1] + log(a_[2][j+1]) # M->D
if m_to_d > V_D[i, j]:
V_D[i, j] = m_to_d
tr[2][i,j] = (0, i, j-1)
d_to_d = V_D[i, j-1] + log(a_[6][j+1]) # D->D
if d_to_d > V_D[i, j]:
V_D[i, j] = d_to_d
tr[2][i,j] = (2, i, j-1)
max_tr_idx = (4,N_-1,0)
return tr, max_tr_idx
def traceback(self, tr_, tr_start_idx_):
"""Trace back the traceback matrix so that we could identify the path with best score."""
traceback_index_list = []
t, i, j = tr_start_idx_
while i != 0:
traceback_index_list.append(i)
if t == 0 or t == 1:
traceback_index_list.append(tr_[t][i,j]['parent'])
traceback_index_list.append(tr_[t][i,j]['grandparent'])
t, i, j = (tr_[t][i,j]['alignment_type'], tr_[t][i,j]['dag_node'], tr_[t][i,j]['hmm_residue'])
elif t == 2:
t, i, j = (tr_[t][i,j]['alignment_type'], tr_[t][i,j]['dag_node'], tr_[t][i,j]['hmm_residue'])
else: # t == 3 or t == 4:
t, i, j = (tr_[t][i]['alignment_type'], tr_[t][i]['dag_node'], tr_[t][i]['hmm_residue'])
# begin node
traceback_index_list.append(i)
traceback_index_list.reverse()
return traceback_index_list
def transfer_emissons(self) -> np.ndarray:
"""Transfer the emission probabilites into numpy arrays from HMM_profile HMM object."""
emissions_from_M : Dict[ProteinCode, np.ndarray] = {ProteinCode(char): np.zeros(len(self)+1) for char in self._alphabet}
emissions_from_I : Dict[ProteinCode, np.ndarray] = {ProteinCode(char): np.zeros(len(self)+1) for char in self._alphabet}
for i, alphabet in enumerate(self._alphabet):
emissions_from_M[alphabet][0] = self._phmm.start_step.p_emission_char[i]
emissions_from_I[alphabet][0] = self._phmm.start_step.p_insertion_char[i]
for i, alphabet in enumerate(self._alphabet):
for j in range(1, len(self)+1):
emissions_from_M[alphabet][j] = self._phmm.steps[j-1].p_emission_char[i]
emissions_from_I[alphabet][j] = self._phmm.steps[j-1].p_insertion_char[i]
# return 2D arrays for performance
return \
np.vstack([emissions_from_M[c] for c in self._alphabet]), \
np.vstack([emissions_from_I[c] for c in self._alphabet])
def transfer_transmissions(self) -> np.ndarray:
"""Transfer the transmission probabilites into numpy arrays from HMM_profile HMM object."""
# these are all the transmissions we want to observe
transmission_list = [
'm->m', 'm->i', 'm->d', 'i->m', 'i->i', 'd->m', 'd->d'
]
transmissions : Dict[str, np.ndarray]= {t: np.zeros(len(self)+1) for t in transmission_list}
for i in range(1, len(self)+1):
transmissions['m->m'][i] = self._phmm.steps[i-1].p_emission_to_emission
transmissions['m->i'][i] = self._phmm.steps[i-1].p_emission_to_insertion
transmissions['m->d'][i] = self._phmm.steps[i-1].p_emission_to_deletion
transmissions['i->m'][i] = self._phmm.steps[i-1].p_insertion_to_emission
transmissions['i->i'][i] = self._phmm.steps[i-1].p_insertion_to_insertion
transmissions['d->m'][i] = self._phmm.steps[i-1].p_deletion_to_emission
transmissions['d->d'][i] = self._phmm.steps[i-1].p_deletion_to_deletion
# return everything as a 2D array for performance
return np.vstack([transmissions[t] for t in transmission_list])
#################################### just for testing
def locate_grand_parents(DAG_):
ordering = nx.algorithms.dag.topological_sort(DAG_)
for node_id in ordering:
for parent in DAG_.predecessors(node_id):
| |
20 to the y axis
# adding the text on the given location
hlp.AddText("Efficiency O(nlogn):", (520, 200))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 230), hlp.white)
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(30)
intro.Introduction2() # calls back the introduction function
def Efficiency():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = 0 # number segment
k = 0 # intersection
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
bo = 0 # bentley-ottmann placeholders
bf = 0 # brute-force placeholders
sh = 0 # shamos-hoey placeholders
bog = 0 # bentley-Ottman placeholders
bfg = 0 # brute-force placeholders
shg = 0 # shamos-hoey placeholders
while True: # starting the initial loop with first game events, ie. quit and mouse button
# starting the initial loop with first game events, ie. quit and mouse button
display.fill((0, 0, 0))
# display.blit(hlp.dscbg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
# again iterating as an important pygame method to set the features.
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
# starting the initial loop with first game events, i.e. quit and mouse button
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if posX1 < pos[0] < posX1 + 130 and posY < pos[1] < posY + 60:
# getting the number of lines
lineTxt = hlp.InsertNumber("Line Number:")
if lineTxt != "": # if the string is not empty
try:
# input gives string so this one turns it into an integer
n = int(lineTxt)
except: # if that is not happening
n = 0 # make n equals to zero, this is a error-handling method by managing the possible error by wrong input, i.e. linetxt can't be converted to an integer
# same as above but for the intersect number
elif posX2 < pos[0] < posX2 + 170 and posY < pos[1] < posY + 60:
intersectTxt = hlp.InsertNumber("Intersect Number :")
if intersectTxt != "":
try:
k = int(intersectTxt)
except:
k = 0
if n > 0:
# using established algorithm efficiency calculation for every algorithm
bo = int((n + k) * math.log10(n))
bog = bo # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
bf = int(n * n)
bfg = bf # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
sh = int(n * math.log10(n))
shg = sh # number to be used in the graph string
if bo > 350 or bf > 350 or sh > 350: # multiply by 350 for later on to use for rectangle object below
m = max(bo, bf, sh)
bo = int((bo / m) * 350)
bf = int((bf / m) * 350)
sh = int((sh / m) * 350)
if bo == 0: # handling zeros for graphs below
bo = 1 # handling zeros for graphs below
if bf == 0: # handling zeros for graphs below
bf = 1 # handling zeros for graphs below
if sh == 0: # handling zeros for graphs below
sh = 1 # handling zeros for graphs below
# setting the texts and buttons
hlp.Button("Insert Line", posX1, posY, 130, 30, None)
hlp.Button("Insert Intersect", posX2, posY, 160, 30, None)
hlp.AddText("Line: " + str(n), (600, 20), hlp.white)
hlp.AddText("Intersect: " + str(k), (600, 50), hlp.white)
hlp.AddText("BF", (180, 460), hlp.white)
hlp.AddText("BO", (330, 460), hlp.white)
hlp.AddText("SH", (480, 460), hlp.white)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (100, 100), (100, 500), 2)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (50, 450), (650, 450), 2)
if bf > 0: # comparing here which one is better, if bf exists
# comparing here which one is better
hlp.AddText(str(bfg), (165, bPos - bf - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (165, bPos - bf, 50, bf)
) # drawing a rectangular bar on the screen
if bo > 0: # comparing here which one is better, if bo exists
# comparing here which one is better
hlp.AddText(str(bog), (315, bPos - bo - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (315, bPos - bo, 50, bo)
) # drawing a rectangular bar on the screen
if sh > 0: # comparing here which one is better, if sh exists
# comparing here which one is better
hlp.AddText(str(shg), (465, bPos - sh - 30), hlp.white)
# drawing a rectangular bar on the screen. # bPos- algorithm name determines the rectangle's dimensions
pygame.draw.rect(display, hlp.button_colour,
(465, bPos - sh, 50, sh))
# setting and drawing the next/back buttons
hlp.Button("Exit", 350, 500, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 650, 500, 100, 30, 1)
if back > 0:
break
nxt = hlp.ButtonWithReturn("Next", 500, 500, 100, 30, 1)
if nxt > 0:
hlp.Description(dsb.effic_desc)
pygame.display.flip() # updates the screen every turn
clock.tick(60) # will not run more than 15 frames per second
intro.Introduction2() # calls back the introduction function
def Efficiency2():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = range(10, 1001) # number segment
bet = False
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
sheffc = [i * math.log10(i) for i in n] # it is a list comprehension method for sh algoritm efficiency.
bfeffc = [i**2 for i in n] # it is a list comprehension method for bf algoritm efficiency.
boeffc = [((i + (((i**2) - i) / 2)) * math.log10(i)) for i in n] # it is a list comprehension method for bo algoritm efficiency.
topalg = sheffc + bfeffc + boeffc # here compiles all efficency into one list
mx = max(topalg) # getting the max value from the list
mn = min(topalg) # getting the min value from the list
transsheffc = [TransValue(i, mx, mn) for i in sheffc] #here it starts a list comprehension to normalize the values for across three efficiencies
transshefc2 = random.sample(transsheffc, 550) #then getting 550 values to represent equally across the pixels
transshefc2.sort() # sorting in descending order
shno = 0 #starting an index for iteration
shpoints = [] #placeholder value
for i in transshefc2[:200]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - int(i))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
for i in transshefc2[200:349]: #here it uses indexing and iteration for creating display pixel | |
'return_failure_list': [ return_failure_list, 'return-failure-list', [ bool, 'None' ], False ],
'query': [ query, 'query', [ SnapmirrorInfo, 'None' ], False ],
}, {
'num-succeeded': [ int, False ],
'num-failed': [ int, False ],
'success-list': [ SnapmirrorBreakIterInfo, True ],
'failure-list': [ SnapmirrorBreakIterInfo, True ],
} )
def snapmirror_modify(self, source_vserver=None, source_volume=None, schedule=None, vserver=None, source_cluster=None, tries=None, destination_vserver=None, destination_location=None, policy=None, destination_volume=None, source_location=None, max_transfer_rate=None, destination_cluster=None):
"""
The snapmirror-modify API changes one or more parameters of a
SnapMirror relationship. The key parameter that identifies any
SnapMirror relationship is the destination volume.
<p>
You must specify the destination endpoint when using the
snapmirror-modify API.
For load-sharing mirrors, a change to a parameter affects all of
the SnapMirror relationships in the set of load-sharing mirrors.
Destination volumes in a set of load-sharing mirrors do not have
individual parameter settings.
Changes made by the snapmirror-modify API do not take effect
until the next manual or scheduled update of the SnapMirror
relationship. Changes do not affect updates that have started and
have not finished yet.
On Data ONTAP 8.1 operating in Cluster-Mode this API must be
issued on the destination Cluster.
On Data ONTAP 8.2 operating in Cluster-Mode, this API must be
issued on the destination Vserver if operating in a Vserver
context and on the destination cluster if operating in a cluster
context.
This API is not supported on Infinite Volume constituents.</p>
:param source_vserver: Specifies the name of the source Vserver for the SnapMirror
relationship. If using this parameter, the following parameters
should also be specified:
<ul>
<li> The name of source volume.
<li> The name of the source cluster on Data ONTAP 8.1, or on Data
ONTAP 8.2 or later operating in Cluster-Mode if the relationship
control plane is 'v1'.
</ul>
:param source_volume: Specifies the name of the source volume of the SnapMirror
relationship. If using this parameter, the following parameters
should also be specified:
<ul>
<li> The name of the source Vserver.
<li> The name of the source cluster on Data ONTAP 8.1 operating
in Cluster-Mode, or on Data ONTAP 8.2 or later operating in
Cluster-Mode if the relationship control plane is 'v1'.
</ul>
:param schedule: Specifies the name of the cron schedule, used to update the
SnapMirror relationship.
:param vserver: If this optional parameter is specified, designates the managing
Vserver. The managing Vserver is authorized to use snapmirror
commands to manage the SnapMirror relationship. The vserver
option is currently a reserved option.
:param source_cluster: Specifies the name of the source cluster for the SnapMirror
relationship. The parameters for the name of the source Vserver,
and the name of the source volume must also be specified if using
this parameter. This parameter is available only on:
<ul>
<li> Data ONTAP 8.1 operating in Cluster-Mode.
<li> Data ONTAP 8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v1'.
</ul>
:param tries: Specifies the maximum number of times to attempt each manual or
scheduled transfer for a SnapMirror relationship. The default is
eight times.
Note: You can set the tries option to zero (0) to disable manual
and scheduled updates for the SnapMirror relationship. This
parameter is only relevant on Data ONTAP 8.1 operating in
Cluster-Mode. On Data ONTAP 8.2 operating in Cluster-Mode, the
maximum number of times to attempt a transfer is an attribute of
the SnapMirror policy. Therefore the value of this parameter is
ignored.
:param destination_vserver: Specifies the name of the destination Vserver for the SnapMirror
relationship. If using this parameter, the following parameters
should also be specified:
<ul>
<li> The name of destination volume.
<li> The name of the destination cluster on Data ONTAP 8.1, or on
Data ONTAP 8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v1'.
</ul>
:param destination_location: Specifies the destination endpoint of the SnapMirror relationship
in one of the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>] on Data
ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume> on Data
ONTAP 8.1 operating in Cluster-Mode, and on Data ONTAP 8.2
operating in Cluster-Mode for relationships using a control plane
compatible with Data ONTAP 8.1 operating in Cluster-Mode.
<li> <[vserver:]volume> on Data ONTAP 8.2 or later
operating in Cluster-Mode except for relationships using a
control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode. This format depends on Vserver peering setup
between the source and destination Vservers.
</ul>
This format may change in the future.
The destination endpoint can be specified using the location
formats above, or by specifying the parameters for the name of
the destination Vserver, the name of the destination volume, and
the name of the destination cluster. The name of the destination
cluster is only required on Data ONTAP 8.1 operating in
Cluster-Mode.
:param policy: Specifies the name of the SnapMirror policy that applies to this
relationship.
:param destination_volume: Specifies the name of the destination volume for the SnapMirror
relationship. If using this parameter, the following parameters
should also be specified:
<ul>
<li> The name of the destination Vserver.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control plane is
'v1'.
</ul>
:param source_location: Specifies the source endpoint of the SnapMirror relationship in
one of the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>] on Data
ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume> on Data
ONTAP 8.1 operating in Cluster-Mode, and on Data ONTAP 8.2
operating in Cluster-Mode for relationships using a control plane
compatible with Data ONTAP 8.1 operating in Cluster-Mode.
<li> <[vserver:]volume> on Data ONTAP 8.2 or later
operating in Cluster-Mode except for relationships using a
control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode. This format depends on Vserver peering setup
between the source and destination Vservers.
</ul>
This format may change in the future.
The source endpoint can be specified using the location formats
above, or by specifying the parameters for the name of the source
Vserver, the name of the source volume, and the name of the
source cluster. The name of the source cluster is only required
on Data ONTAP 8.1 operating in Cluster-Mode.
:param max_transfer_rate: Specifies the upper bound, in kilobytes per second, at which data
is transferred. The default is unlimited (0) which permits the
SnapMirror relationship to fully utilize the available network
bandwidth. The max-transfer-rate option does not affect
load-sharing transfers and transfers for other relationships with
Relationship Capability of Pre 8.2 confined to a single cluster.
:param destination_cluster: Specifies the destination cluster name for the SnapMirror
relationship. The parameters for the name of the destination
Vserver, and the name of the destination volume must also be
specified if using this parameter. This parameter is available
only on:
<ul>
<li> Data ONTAP 8.1 operating in Cluster-Mode.
<li> Data ONTAP 8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v1'.
</ul>
"""
return self.request( "snapmirror-modify", {
'source_vserver': [ source_vserver, 'source-vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
'schedule': [ schedule, 'schedule', [ basestring, 'None' ], False ],
'vserver': [ vserver, 'vserver', [ basestring, 'vserver-name' ], False ],
'source_cluster': [ source_cluster, 'source-cluster', [ basestring, 'None' ], False ],
'tries': [ tries, 'tries', [ basestring, 'None' ], False ],
'destination_vserver': [ destination_vserver, 'destination-vserver', [ basestring, 'None' ], False ],
'destination_location': [ destination_location, 'destination-location', [ basestring, 'None' ], False ],
'policy': [ policy, 'policy', [ basestring, 'sm-policy' ], False ],
'destination_volume': [ destination_volume, 'destination-volume', [ basestring, 'None' ], False ],
'source_location': [ source_location, 'source-location', [ basestring, 'None' ], False ],
'max_transfer_rate': [ max_transfer_rate, 'max-transfer-rate', [ int, 'None' ], False ],
'destination_cluster': [ destination_cluster, 'destination-cluster', [ basestring, 'None' ], False ],
}, {
} )
def snapmirror_abort_iter(self, query, check_only=None, max_failure_count=None, clear_checkpoint=None, | |
of expected '{}'".format(tok, etok))
def _raise_exception(self, msg):
""" Raise a Parsing exception
Args:
msg: Exception message
"""
raise FznParserException(self.tokenizer.build_error_string(msg))
class FznParser(object):
""" Reader of FZN file format """
__slots__ = ('model', # Read model
'compiled', # Model compiled indicator
'reader', # FZN reader
'cpo_exprs', # Dictionary of CPO expressions. Key=name, value=CPO expr
'reduce', # Reduce model indicator
'interval_gen', # Name generator for interval var expressions
'cumul_gen', # Name generator for cumul atom expressions
'parameters', # List of parameters
'variables', # List of variables
'constraints', # List of model constraints
'objective', # Model objective
'cur_constraint', # Currently compiled constraint descriptor
'def_var_exprs', # List of expressions waiting for defvars to be defined
'cpo_variables', # Set of names of variables that are translated as real CPO variables
)
def __init__(self, mdl=None):
""" Create a new FZN format parser
Args:
mdl: Model to fill, None (default) to create a new one.
"""
super(FznParser, self).__init__()
self.model = mdl if mdl is not None else CpoModel()
self.compiled = False
self.reader = FznReader()
self.interval_gen = IdAllocator("IntervalVar_")
self.cumul_gen = IdAllocator("VarCumulAtom_")
# Do not store location information (would store parser instead of real lines)
self.model.source_loc = False
# Set model reduction indicator
self.reduce = config.context.parser.fzn_reduce
def get_model(self):
""" Get the model that have been parsed
Return:
CpoModel result of the parsing
"""
if not self.compiled:
self.compiled = True
self._compile_to_model()
return self.model
def parse(self, cfile):
""" Parse a FZN file
Args:
cfile: FZN file to read
Raises:
FznParserException: Parsing exception
"""
if self.model.source_file is None:
self.model.source_file = cfile
self.reader.parse(cfile)
def parse_string(self, str):
""" Parse a string
Result of the parsing is added to the current result model.
Args:
str: String to parse
"""
self.reader.parse_string(str)
def get_output_variables(self):
""" Get the list of model output variables
Returns:
List of output variables, in declaration order.
"""
return [v for v in self.variables if v.is_output()]
def _write_model(self, out=None):
""" Print read model (short version)
Args:
out (optional): Output stream. Default is stdout
"""
if out is None:
out = sys.stdout
out.write(self.get_model().get_cpo_string(short_output=True))
out.write("\n")
def _get_cpo_expr_map(self):
""" For testing, get the map of CPO expressions
"""
self.get_model()
return self.cpo_exprs
def _compile_to_model(self):
""" Compile FZN model into CPO model
"""
# Initialize processing
self.cpo_exprs = {}
self.parameters = self.reader.parameters
self.variables = self.reader.variables
self.constraints = self.reader.constraints
self.objective = self.reader.objective
self.def_var_exprs = {}
self.cpo_variables = set()
# Reduce model if required
if self.reduce:
self._reduce_model()
# print("=== Variables:")
# for v in self.variables:
# print(" : {}".format(v))
# print("=== Constraints:")
# for c in self.constraints:
# print(" : {}".format(c))
# sys.stdout.flush()
# Compile parameters
for x in self.parameters:
self._compile_parameter(x)
# Compile variables
for x in self.variables:
self._compile_variable(x)
# Compile constraints
if self.reduce:
for x in self.constraints:
if isinstance(x, FznVariable):
self._compile_variable(x)
else:
self._compile_constraint(x)
else:
for x in self.constraints:
self._compile_constraint(x)
# Compile objective
self._compile_objective(self.objective)
def _compile_parameter(self, fp):
""" Compile a FZN parameter into CPO model
Args:
fp: Flatzinc parameter, object of class FznParameter
"""
if fp.type in ('int', 'bool'):
expr = CpoValue(fp.value, Type_IntArray if fp.size else Type_Int)
elif fp.type == 'float':
expr = CpoValue(fp.value, Type_FloatArray if fp.size else Type_Float)
else:
expr = build_cpo_expr(fp.value)
# Add to map
expr.set_name(fp.name)
self.cpo_exprs[fp.name] = expr
def _compile_variable(self, fv):
""" Compile a FZN variable into CPO model
Args:
fv: Flatzinc variable
"""
# Check if variable is array
val = fv.value
if fv.size:
# Build array of variables
if val:
# Check if there is a reference to a not yet defined variable
if self.reduce:
for v in val:
if v in self.def_var_exprs:
self.def_var_exprs[v].append(fv)
return
arr = [self._get_cpo_expr(e) for e in val]
expr = CpoValue(arr, Type_IntVarArray if all(x.type == Type_IntVar for x in arr) else Type_IntExprArray)
else:
# Build array of variables
arr = [integer_var(name=fv.name + '[' + str(i + 1) + ']', domain=fv.domain) for i in range(fv.size)]
expr = CpoValue(arr, Type_IntVarArray)
else:
# Build single variable
if self.reduce and val:
if is_int(val):
expr = CpoValue(val, Type_Int)
elif is_bool(val):
expr = CpoValue(val, Type_Bool)
else:
expr = self._get_cpo_expr(val)
else:
# Check if value is another variable
if isinstance(val, FznVariable):
# Retrieve existing variable
expr = self.cpo_exprs.get(val.name)
assert isinstance(expr, CpoIntVar), "Variable '{}' not found".format(val.name)
else:
# Create new variable
dom = _build_domain(val) if val else fv.domain
expr = integer_var(domain=dom)
expr.set_name(fv.name)
self.cpo_exprs[fv.name] = expr
def _compile_constraint(self, fc):
""" Compile a FZN constraint into CPO model
Args:
fv: Flatzinc constraint
"""
# Search in local methods
cmeth = getattr(self, "_compile_pred_" + fc.predicate, None)
if not cmeth:
raise FznParserException("Predicate '{}' is not supported.".format(fc.predicate))
# Call compile method
cmeth(fc)
def _compile_objective(self, fo):
""" Compile a FZN objective into CPO model
Args:
fo: Flatzinc objective
"""
#print("Compile objective {}".format(fo))
if fo is None:
return
if fo.operation != 'satisfy':
expr = self._get_cpo_expr(fo.expr)
oxpr = modeler.maximize(expr) if fo.operation == 'maximize' else modeler.minimize(expr)
self._add_to_model(oxpr)
def _reduce_model(self):
""" Reduce model size by factorizing expressions when possible
"""
# Access main model elements
variables = self.variables
constraints = self.constraints
# Build reduction data related to variables
for fv in variables:
# Build list of variables that are referenced by this one
fv.ref_vars = tuple(v for v in fv.value if isinstance(v, FznVariable)) if fv.size else ()
# Set in defined variables if output
if fv.is_output():
self.cpo_variables.add(fv.name)
# In constraints, replace reference to arrays by arrays themselves
for fc in constraints:
fc.args = tuple(a.value if isinstance(a, FznVariable) and a.size else a for a in fc.args)
# Initialize set of variables defined in constraints
def_var_map = {} # Key is variable, value is constraint where variable is defined
for fc in constraints:
# print("Scan constraint {}".format(fc))
defvar = fc.defvar
if defvar is not None:
def_var_map[defvar] = fc
# Build list of all variables referenced by this constraint
res = set()
nbrefdvar = 0
for v in fc._ref_vars_iterator():
if v is defvar:
nbrefdvar += 1
else:
res.add(v)
fc.ref_vars = tuple(res)
# Special case for cumulative. All variables are supposed defined by the constraint.
if fc.predicate == 'cumulative':
for v in fc.ref_vars:
def_var_map[v] = fc
# print(" result list of ref variables: {}".format(fc.ref_vars))
# If defined variable is referenced twice in the constraint, remove it as defined (keep as declared variable)
if nbrefdvar > 1:
fc.defvar = None
self.cpo_variables.add(defvar.name)
# Determine connected variable subsets
#self._determine_connex_variables(def_var_map)
# Scan variables to move them after definition of their dependencies when needed
variables = [] # New list of variables
for fv in self.variables:
#print("Scan variable {}, Refvars: {}".format(fv, [v.name for v in fv.ref_vars]))
if any(v in def_var_map for v in fv.ref_vars):
if self._insert_in_constraints(fv, 0, def_var_map):
# Remove from list of variables (moved in constraints)
pass
else:
# Keep it as a model variable
self.cpo_variables.add(fv.name)
variables.append(fv)
else:
variables.append(fv)
self.variables = variables
# Reorder constraints
#print("\nScan constraints. Defined vars: {}".format([v.name for v in defined_vars]))
constraints = self.constraints
nbct = len(constraints)
movedcstr = set() # Constraints already moved
cx = 0
while cx < nbct:
fc = constraints[cx]
#print("Scan constraint {}. Refvars: {}".format(fc, [v.name for v in fc.ref_vars]))
#print("Defined vars: {}".format([v.name for v in defined_vars]))
# Process case of variable that has been inserted in constraints
if isinstance(fc, FznVariable):
def_var_map.pop(fc, None)
else:
# Search if constraint can be moved
if (fc not in movedcstr) and (fc.predicate != "cumulative") \
and any(v in def_var_map for v in fc.ref_vars) \
and self._insert_in_constraints(fc, cx + 1, def_var_map):
# Move constraint after all is defined
del constraints[cx]
movedcstr.add(fc)
cx -= 1
else:
# Constraint stays where it is
if fc.defvar:
def_var_map.pop(fc.defvar, None)
cx += 1
# print("Reduction ended.")
# print(" Variables:")
# for v in self.variables:
# print(" {}".format(v))
# print(" Constraints:")
# for c in self.constraints:
# print(" {}".format(c))
def _insert_in_constraints(self, fc, cx, varsctsr):
""" Insert a constraint or a variable in constraints after all its members are defined
Args:
fc: FZN constraint or variable to insert
cx: Start insertion index
varsctsr: Map of constraints where each variable is defined
Return:
True if insertion was successful, False otherwise
"""
# Build set of constraints | |
IIi11IIiIii1 = float((float(speed) - float(iII11I1Ii1))) * float((float(o0o0oOo0oO) / float(oOo0OoOOo0)))
elif speed <= 20:
I1iIII1 = 29
iIii = 58
oOo0OoOOo0 = 10
iII11I1Ii1 = 10
o0o0oOo0oO = (iIii - I1iIII1)
IIi11IIiIii1 = I1iIII1 + float((float(speed) - float(iII11I1Ii1))) * float(
(float(o0o0oOo0oO) / float(oOo0OoOOo0)))
elif speed <= 30:
I1iIII1 = 59
iIii = 90
oOo0OoOOo0 = 10
iII11I1Ii1 = 20
o0o0oOo0oO = (iIii - I1iIII1)
IIi11IIiIii1 = I1iIII1 + float((float(speed) - float(iII11I1Ii1))) * float(
(float(o0o0oOo0oO) / float(oOo0OoOOo0)))
elif speed <= 50:
I1iIII1 = 91
iIii = Ooo0oo
oOo0OoOOo0 = 20
iII11I1Ii1 = 30
o0o0oOo0oO = (iIii - I1iIII1)
IIi11IIiIii1 = I1iIII1 + float((float(speed) - float(iII11I1Ii1))) * float(
(float(o0o0oOo0oO) / float(oOo0OoOOo0)))
elif speed > 50:
IIi11IIiIii1 = Ooo0oo
IIi1IIIIi = "%.0f" % float(IIi11IIiIii1)
if speed > 5:
IIi1IIIIi = '-' + str(IIi1IIIIi)
if 70 - 70: ooO0oo0oO0 / II111iiii - iIii1I11I1II1 - o0oOoO00o
ooo000o0ooO0 = 66
I1I = 260
oOoo000 = (self.screenx / 3) - (ooo000o0ooO0 / 2) + 28
OooOo00o = (self.screeny / 3) + (I1I / 2) - 88
self.imgGauge_arrow.setAnimations([
('conditional', 'effect=rotate start=%d end=%d center=%d,%d condition=Control.IsVisible(%d) time=%d' % (
int(last_speed), int(IIi1IIIIi), oOoo000, OooOo00o, self.imgGauge.getId(), time))
])
return IIi1IIIIi
if 11 - 11: iIii1I11I1II1.OoooooooOO.II111iiii / i1IIi - i111I
def displayAdvertisement(self):
try:
Oo000ooOOO = urlopen(ii1ii11).readline().strip(' \t\n\r')
except:
Oo000ooOOO = self.default_advertisement
print Oo000ooOOO
if 84 - 84: O0.i111I - II111iiii.OOO0O / II111iiii
iii1 = 1280
I1i = 720
oOo00O000Oo0 = 210
I1iI1I1I1i11i = 500
iiI11 = iii1 - oOo00O000Oo0 - 30
OOoO000 = I1i - I1iI1I1I1i11i - 20
if 86 - 86: Oo0Ooo / oooO0oo0oOOOO + O0 * o0oOoO00o
if 19 - 19: II111iiii * I11iii11IIi + II1Ii1iI1i
if 65 - 65: ooO0oo0oO0.o0ooo.OoO0O00.o0oOoO00o - ooO0oo0oO0
if 19 - 19: i11iIiiIii + o0oOoO00o % OOO0O
def onAction(self, action):
if action == i1 or action == oOOoo00O0O:
self.saveClose()
if 14 - 14: OoO0O00.II111iiii.i111I / II1Ii1iI1i % I1ii11iIi11i - OOO0O
def onControl(self, control):
if control == self.button_run:
if 67 - 67: i111I - ooO0oo0oO0.i1IIi
self.speedtest(share=True, simple=True)
if 35 - 35: o0oOoO00o + OOO0O - oooO0oo0oOOOO.o0oOoO00o.I11iii11IIi
self.imgCentertext.setImage(' ')
self.imgProgress.setEnabled(False)
self.please_wait_textbox.setVisible(False)
self.dlul_prog_textbox.setLabel('')
self.dl_textbox.setLabel('')
self.ul_textbox.setLabel('')
self.ping_textbox.setLabel('')
self.imgResults.setEnabled(False)
self.imgGauge_arrow.setVisible(False)
self.imgGauge.setEnabled(False)
self.showResult()
self.button_close.setVisible(True)
self.setFocus(self.button_close)
if control == self.button_close:
self.close()
if 87 - 87: OoOoOO00
def saveClose(self):
if 25 - 25: i1IIi.OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00.i11iIiiIii - oooO0oo0oOOOO.oooO0oo0oOOOO
self.close()
if 31 - 31: ooO0oo0oO0 / Oo0Ooo * i1IIi.OoOoOO00
def update_textbox(self, text):
self.textbox.setText("\n".join(text))
if 57 - 57: ooO0oo0oO0 + iIii1I11I1II1 % i1IIi % I1IiiI
def error(self, message):
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1.i111I % oooO0oo0oOOOO.OoooooooOO
self.imgProgress.setImage(' ')
self.button_close.setVisible(True)
self.setFocus(self.button_close)
if 94 - 94: II1Ii1iI1i + iIii1I11I1II1 % OoO0O00
def downloadSpeed(self, files, quiet=False):
if 93 - 93: II1Ii1iI1i - ooO0oo0oO0 + iIii1I11I1II1 * o0oOOo0O0Ooo + o0ooo.o0oOoO00o
if 49 - 49: OoooooooOO * i111I - Oo0Ooo.oooO0oo0oOOOO
i1I1iI1iIi111i = timeit.default_timer()
if 89 - 89: OOO0O + II1Ii1iI1i * OOO0O / OOO0O
def I1I1I(q, files):
for file in files:
OoOO000 = o0o(file, i1I1iI1iIi111i)
OoOO000.start()
q.put(OoOO000, True)
if 46 - 46: OoO0O00
if not quiet and not Oo.isSet():
sys.stdout.write('.')
sys.stdout.flush()
if 71 - 71: i111I / i111I * oooO0oo0oOOOO * oooO0oo0oOOOO / II111iiii
ii1Ii11I = []
if 35 - 35: ooO0oo0oO0 * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo.OoOoOO00
def O0O(q, total_files):
O00o00O = 0
while len(ii1Ii11I) < total_files:
OoOO000 = q.get(True)
while OoOO000.isAlive():
OoOO000.join(timeout=0.1)
ii1Ii11I.append(sum(OoOO000.result))
ii1iii11i1 = ((sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i)) / 1000 / 1000) * 8
O00o00O = self.configGauge(ii1iii11i1, O00o00O)
self.dlul_prog_textbox.setLabel('%.02f Mbps ' % ii1iii11i1)
del OoOO000
if 4 - 4: I11iii11IIi.I11iii11IIi % I1ii11iIi11i % II1Ii1iI1i / II1Ii1iI1i
OOooooO0Oo = Queue(6)
OO = threading.Thread(target=I1I1I, args=(OOooooO0Oo, files))
iIiIIi1 = threading.Thread(target=O0O, args=(OOooooO0Oo, len(files)))
i1I1iI1iIi111i = timeit.default_timer()
OO.start()
iIiIIi1.start()
while OO.isAlive():
OO.join(timeout=0.1)
while iIiIIi1.isAlive():
iIiIIi1.join(timeout=0.1)
return (sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i))
if 29 - 29: Oo0Ooo * OOO0O * I1ii11iIi11i / i11iIiiIii
def uploadSpeed(self, url, sizes, quiet=False):
if 26 - 26: I11iii11IIi % o0ooo % oooO0oo0oOOOO % II1Ii1iI1i
if 55 - 55: OOO0O % OoooooooOO / OoooooooOO % OoooooooOO
i1I1iI1iIi111i = timeit.default_timer()
if 52 - 52: I1ii11iIi11i + I1ii11iIi11i.II111iiii
def I1I1I(q, sizes):
for iI in sizes:
OoOO000 = I11iiI1i1(url, i1I1iI1iIi111i, iI)
OoOO000.start()
q.put(OoOO000, True)
if not quiet and not Oo.isSet():
sys.stdout.write('.')
sys.stdout.flush()
if 34 - 34: OoooooooOO.O0 / oooO0oo0oOOOO * OoOoOO00 - I1ii11iIi11i
ii1Ii11I = []
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
def O0O(q, total_sizes):
O00o00O = 0
while len(ii1Ii11I) < total_sizes:
OoOO000 = q.get(True)
while OoOO000.isAlive():
OoOO000.join(timeout=0.1)
ii1Ii11I.append(OoOO000.result)
if 22 - 22: i1IIi + II1Ii1iI1i
ii1iii11i1 = ((sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i)) / 1000 / 1000) * 8
O00o00O = self.configGauge(ii1iii11i1, O00o00O)
self.dlul_prog_textbox.setLabel('%.02f Mbps ' % ii1iii11i1)
del OoOO000
if 54 - 54: OOO0O % ooO0oo0oO0.o0ooo + oooO0oo0oOOOO - ooO0oo0oO0 * I1IiiI
OOooooO0Oo = Queue(6)
OO = threading.Thread(target=I1I1I, args=(OOooooO0Oo, sizes))
iIiIIi1 = threading.Thread(target=O0O, args=(OOooooO0Oo, len(sizes)))
i1I1iI1iIi111i = timeit.default_timer()
OO.start()
iIiIIi1.start()
while OO.isAlive():
OO.join(timeout=0.1)
while iIiIIi1.isAlive():
iIiIIi1.join(timeout=0.1)
return (sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i))
if 92 - 92: o0oOOo0O0Ooo + o0ooo / Oo0Ooo % OoO0O00 % I11iii11IIi.OoooooooOO
def speedtest(self, list=False, mini=None, server=None, share=False, simple=False, src=None, timeout=10,
units=('bit', 8), version=False):
self.imgPing.setVisible(True)
self.imgPing_glow.setVisible(True)
oo0 = []
if 52 - 52: OOO0O / i11iIiiIii - ooO0oo0oO0.I11iii11IIi % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oooO0oo0oOOOO % i111I * OoOoOO00.O0 / II1Ii1iI1i.I1ii11iIi11i
if 58 - 58: Oo0Ooo / oooO0oo0oOOOO
global Oo, oOoOo00oOo
Oo = threading.Event()
if 44 - 44: ooO0oo0oO0
oOOo0OOOo00O = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
if 54 - 54: II1Ii1iI1i - i111I - o0ooo.iIii1I11I1II1
socket.setdefaulttimeout(timeout)
if 79 - 79: II1Ii1iI1i.OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo.o0oOOo0O0Ooo % OOO0O
if src:
oOoOo00oOo = src
socket.socket = II1III
if 15 - 15: II1Ii1iI1i * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
oo0.append('Retrieving speedtest.net configuration...')
self.update_textbox(oo0)
if not simple:
Ii11iI1i('Retrieving speedtest.net configuration...')
try:
OooooOOoo0 = O0Oo0oOOoooOOOOo()
except URLError:
Ii11iI1i('Cannot retrieve speedtest configuration')
return False
if 60 - 60: I1IiiI * o0ooo % OoO0O00 + oooO0oo0oOOOO
oo0.append('Retrieving speedtest.net server list...')
self.update_textbox(oo0)
self.imgCentertext.setImage(self.image_centertext_testingping)
if not simple:
Ii11iI1i('Retrieving speedtest.net server list...')
if list or server:
i111iIi1i1II1 = i1iI(OooooOOoo0['client'], True)
if list:
I1i11i = []
for server in i111iIi1i1II1:
IiIi = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
I1i11i.append(IiIi)
if 52 - 52: i1IIi
if 84 - 84: II1Ii1iI1i / I11iii11IIi
if 86 - 86: OoOoOO00 * II111iiii - O0.OoOoOO00 % iIii1I11I1II1 / ooO0oo0oO0
try:
unicode()
Ii11iI1i('\n'.join(I1i11i).encode('utf-8', 'ignore'))
except NameError:
Ii11iI1i('\n'.join(I1i11i))
except IOError:
pass
sys.exit(0)
else:
i111iIi1i1II1 = i1iI(OooooOOoo0['client'])
if 11 - 11: I1IiiI * oooO0oo0oOOOO + I1ii11iIi11i / I1ii11iIi11i
oo0.append('Testing from %(isp)s (%(ip)s)...' % OooooOOoo0['client'])
self.update_textbox(oo0)
if 37 - 37: i11iIiiIii + i1IIi
if not simple:
Ii11iI1i('Testing from %(isp)s (%(ip)s)...' % OooooOOoo0['client'])
if 23 - 23: o0oOoO00o + i111I.OoOoOO00 * I1IiiI + I1ii11iIi11i
if server:
try:
IIIIiIiIi1 = II1iIi11(filter(lambda i1II1I1Iii1: i1II1I1Iii1['id'] == server,
i111iIi1i1II1))
except IndexError:
Ii11iI1i('Invalid server ID')
return False
elif mini:
iiI11Iii, O0o0O0 = os.path.splitext(mini)
if O0o0O0:
oooO = os.path.dirname(mini)
else:
oooO = mini
o0OO0o0o00o = urlparse(oooO)
try:
O0OOo00oo0oOo = O0ooo0O0oo0(mini)
OoOo0o = urlopen(O0OOo00oo0oOo)
except:
Ii11iI1i('Invalid Speedtest Mini URL')
return False
else:
o0o0 = OoOo0o.read()
OoOo0o.close()
Ii1II1I11i1 = re.findall('upload_extension: "([^"]+)"', o0o0.decode())
if not Ii1II1I11i1:
for O0o0O0 in ['php', 'asp', 'aspx', 'jsp']:
try:
O0OOo00oo0oOo = O0ooo0O0oo0('%s/speedtest/upload.%s' %
(mini, O0o0O0))
OoOo0o = urlopen(O0OOo00oo0oOo)
except:
pass
else:
O0OO0O = OoOo0o.read().strip()
if (OoOo0o.code == 200 and
len(O0OO0O.splitlines()) == 1 and
re.match('size=[0-9]', O0OO0O)):
Ii1II1I11i1 = [O0o0O0]
break
if not o0OO0o0o00o or not Ii1II1I11i1:
Ii11iI1i('Please provide the full URL of your Speedtest Mini server')
return False
i111iIi1i1II1 = [{
'sponsor': 'Speedtest Mini',
'name': o0OO0o0o00o[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (oooO.rstrip('/'), Ii1II1I11i1[0]),
'latency': 0,
'id': 0
}]
try:
IIIIiIiIi1 = II1iIi11(i111iIi1i1II1)
except:
IIIIiIiIi1 = i111iIi1i1II1[0]
else:
if not simple:
oo0.append('Selecting best server based on latency...')
self.update_textbox(oo0)
Ii11iI1i('Selecting best server based on latency...')
IIIIiIiIi1 = II1iIi11(i111iIi1i1II1)
if 18 - 18: I11iii11IIi * o0oOOo0O0Ooo.I11iii11IIi / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * OOO0O - OoOoOO00
if not simple:
if | |
0.5
print ("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print ("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
max_scores += max_score_min * 0.001
# max_scores += (max_scores[max_scores>0.0])
return max_scores
def rank_expanded_signals_max(x, x_exp, y, y_exp, max_comp=10, k=1, operation="average", max_num_samples_for_ev=None,
max_test_samples_for_ev=None, offsetting_mode="max_comp features", verbose=False):
""" This Second ranking method more robust and closer to max I(x; y_i + Y)-I(x;Y) for all Y.
Ordering and scoring of signals respects principle of best incremental feature selection
Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
max_scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is notNone and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print ("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# Update scores
max_scores[indices_available] = numpy.maximum(max_scores[indices_available],
temp_explained_vars - last_explained_var)
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short,
" and index_ long:", max_explained_var_index_long)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
# last_score = scores[max_explained_var_index_long]
last_explained_var = temp_explained_vars[max_explained_var_index_short]
print("brute max_scores = ", max_scores)
print("brute taken = ", taken)
# Find ordering of variables not yet taken
if max_comp < dim_out:
max_explained_var_indices_short = \
temp_explained_vars.argsort()[::-1][1:]
# In increasing order, then remove first element, which was already added to taken
for max_explained_var_index_short in max_explained_var_indices_short:
taken.append(indices_available[max_explained_var_index_short])
print("final taken = ", taken)
# Make scoring decreasing in ordering stored in taken
last_explained_var = max(last_explained_var, 0.01) # For numerical reasons
last_max_score = -numpy.inf
sum_max_scores = 0.0
for i, long_index in enumerate(taken):
current_max_score = max_scores[long_index]
sum_max_scores += current_max_score
if current_max_score > last_max_score and i > 0:
max_scores[long_index] = last_max_score
tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum()
max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1)
last_max_score = max_scores[long_index]
# print "iteration max_scores = ", max_scores
print("preeliminar max_scores = ", max_scores)
# Compute explained variance with all features
indices_all_x_selection = random_subindices(x.shape[0], max_num_samples_for_ev)
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
indices_all_y_selection = random_subindices(y.shape[0], max_test_samples_for_ev)
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_sel, y_exp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_sel, y_exp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
explained_var_all_feats = compute_explained_var(y_sel, y_app_sel)
print("last_explained_var =", last_explained_var)
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
# max_scores *= (last_explained_var / max_scores.sum())**0.5
# NOTE: last_explained_var is not the data variance. It is the variance up to max_comp components
# 3 options: all scores, max_comp scores, output_dim scores (usually all scores)
if offsetting_mode == "max_comp features":
max_scores *= (last_explained_var / max_scores.sum())
elif offsetting_mode == "all features":
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
max_scores *= (explained_var_all_feats / max_scores.sum())
elif offsetting_mode == "all features smart":
max_scores *= (last_explained_var / max_scores.sum())
print("scaled max_scores=", max_scores)
max_scores += (explained_var_all_feats - last_explained_var) / max_scores.shape[0]
print("offsetted max_scores=", max_scores)
elif offsetting_mode == "democratic":
max_scores = numpy.ones_like(max_scores) * explained_var_all_feats / max_scores.shape[0]
print("democractic max_scores=", max_scores)
elif offsetting_mode == "linear":
# Code fixed!!!
max_scores = numpy.arange(dim_out, 0, -1) * explained_var_all_feats / (dim_out * (dim_out + 1) / 2)
print("linear max_scores=", max_scores)
elif offsetting_mode == "sensitivity_based":
sens = sensivity_of_linearly_approximation(x_sel, x_exp_sel)
max_scores = sens * explained_var_all_feats / sens.sum()
print("sensitivity_based max_scores=", max_scores)
else:
ex = "offsetting_mode unknown", offsetting_mode
raise Exception(ex)
print("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
max_scores += max_score_min * 0.001
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
# max_scores += (max_scores[max_scores>0.0])
return max_scores
# TODO: Improve: if max_comp < output_dim choose remaining features from the last evaluation of explained variances.
def rank_expanded_signals(x, x_exp, y, y_exp, max_comp=10, k=1, linear=False, max_num_samples_for_ev=None,
max_test_samples_for_ev=None, verbose=False):
""" Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
y_app_sel = approximate_kNN(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
label_avg=True) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short)
print(" and index_ long:", max_explained_var_index_long)
# update total explained var & scores
# Add logic to robustly handle strange contributions: 3, 2, 1, 4 => 5, 2.5, 1.25, 1.25 ?
# TODO:FIX NORMALIZATION WHEN FIRST SCORES ARE ZERO OR NEGATIVE!
# TODO:NORMALIZATION SHOULD BE OPTIONAL, SINCE IT WEAKENS THE INTERPRETATION OF THE SCORES
explained_var = max(temp_explained_vars[max_explained_var_index_short], 0.0)
new_score = explained_var - last_explained_var
if verbose:
print("new_score raw = ", new_score)
new_score = max(new_score, 0.0)
if new_score > last_score and iteration > 0:
new_score = last_score # Here some options are available to favour components taken first
scores[max_explained_var_index_long] = new_score
if verbose:
| |
<gh_stars>1-10
import torch
import numpy as np
import math
def radectolm(ra,dec,ra0,dec0):
# return source direction cosines [l,m,n] obtained for a source at spherical
# coordinates (ra,dec) with respect to phase center (ra0,dec0).
if dec0<0.0 and dec>=0.0:
dec0=dec0+2.0*math.pi
l=math.sin(ra-ra0)*math.cos(dec)
m=-(math.cos(ra-ra0)*math.cos(dec)*math.sin(dec0)-math.cos(dec0)*math.sin(dec))
n=(math.sqrt(1.-l*l-m*m)-1.)
return (l,m,n)
def lmtoradec(l,m,ra0,dec0):
sind0=math.sin(dec0)
cosd0=math.cos(dec0)
dl=l
dm=m
d0=pow(dm,2)*pow(sind0,2)+pow(dl,2)-2*dm*cosd0*sind0
sind=math.sqrt(abs(pow(sind0,2)-d0))
cosd=math.sqrt(abs(pow(cosd0,2)+d0))
if sind0>0:
sind=abs(sind)
else:
sind=-abs(sind)
dec=math.atan2(sind,cosd)
if l != 0:
ra=math.atan2(-dl,cosd0-dm*sind0)+ra0
else:
ra=atan2(1e-10,cosd0-dm*sind0)+ra0
return ra,dec
def radToRA(rad):
# Radians to RA=[hr,min,sec]
# Rad=(hr+min/60+sec/60*60)*pi/12
# convert negative values
if rad <0:
rad=rad+2*math.pi
tmpval=rad*12.0/math.pi
hr=math.floor(tmpval)
tmpval=tmpval-hr
tmpval=tmpval*60
mins=math.floor(tmpval)
tmpval=tmpval-mins
tmpval=tmpval*60
sec=tmpval
hr=hr%24
mins=mins%60
return hr,mins,sec
def radToDec(rad):
# Radians to Dec=[hr,min,sec]
# Rad=(hr+min/60+sec/60*60)*pi/180
if rad<0:
mult=-1
rad=abs(rad)
else:
mult=1
tmpval=rad*180.0/math.pi
hr=math.floor(tmpval)
tmpval=tmpval-hr
tmpval=tmpval*60
mins=math.floor(tmpval)
tmpval=tmpval-mins
tmpval=tmpval*60
sec=tmpval
hr=mult*(hr%180)
mins=mins%60
return hr,mins,sec
# read solutions file, return solutions tensor and frequency
# return freq,J
def readsolutions(filename):
fh=open(filename,'r')
# skip first 2 lines
next(fh)
next(fh)
# freq/MHz BW/MHz time/min N K Ktrue
curline=next(fh)
cl=curline.split()
freq=float(cl[0])*1e6
Ns=int(cl[3]) # stations
K=int(cl[5]) # true directions
fullset=fh.readlines()
fh.close()
Nt=len(fullset)
Nto=Nt//(8*Ns)
a=np.zeros((Nt,K),dtype=np.float32)
ci=0
for cl in fullset:
cl1=cl.split()
for cj in range(len(cl1)-1):
a[ci,cj]=float(cl1[cj+1])
ci +=1
J=np.zeros((K,2*Ns*Nto,2),dtype=np.csingle)
for m in range(K):
for n in range(Ns):
J[m,2*n:2*Ns*Nto:2*Ns,0]=a[8*n:Nto*8*Ns:Ns*8,m]+1j*a[8*n+1:Nto*8*Ns:Ns*8,m]
J[m,2*n:2*Ns*Nto:2*Ns,1]=a[8*n+2:Nto*8*Ns:Ns*8,m]+1j*a[8*n+3:Nto*8*Ns:Ns*8,m]
J[m,2*n+1:2*Ns*Nto:2*Ns,0]=a[8*n+4:Nto*8*Ns:Ns*8,m]+1j*a[8*n+5:Nto*8*Ns:Ns*8,m]
J[m,2*n+1:2*Ns*Nto:2*Ns,1]=a[8*n+6:Nto*8*Ns:Ns*8,m]+1j*a[8*n+7:Nto*8*Ns:Ns*8,m]
return (freq,J)
# read solutions file for spatial model, return solutions Zspat tensor
# return N(stations), F(freq poly) theta,phi (polar coordinate of calibration dirs Kx1) and Z
def read_spatial_solutions(filename):
fh=open(filename,'r')
# skip first 3 lines
next(fh)
next(fh)
next(fh)
# reference_freq/MHz Freqpol(F) Spatialpol(G) N K Ktrue
# F, G are number of polynomials in frequency and space
curline=next(fh)
cl=curline.split()
freq=float(cl[0])*1e6
F=int(cl[1]) # polynomials in frequency
G=int(cl[2]) # polynomials in space (spherical harmonics)
Ns=int(cl[3]) # stations
K=int(cl[5]) # true directions
# spherical harmonic order
n0=int(math.sqrt(G))
# next two lines, Kx1 values for source coordinates
curline=next(fh)
cl=curline.split()
thetak=[float(x) for x in cl]
curline=next(fh)
cl=curline.split()
phik=[float(x) for x in cl]
assert(len(phik)==len(thetak) and len(phik)==K)
# the remaining lines will have 1+G columns, first col from 0..8FN-1
fullset=fh.readlines()
fh.close()
Nt=len(fullset)
Nto=Nt//(8*F*Ns)
a=np.zeros((Nt,G),dtype=np.float32)
ci=0
for cl in fullset:
cl1=cl.split()
for cj in range(len(cl1)-1):
a[ci,cj]=float(cl1[cj+1])
ci +=1
Z=np.zeros((Nto,2*F*Ns,2*G),dtype=np.csingle)
for ci in range(Nto):
# split each col of a[] to 4FN x 2,
# each 4FN yields 2FN complex, which is one col of Z
for cj in range(G):
b=a[(8*F*Ns)*ci:(8*F*Ns)*(ci+1),cj]
c=b[0:8*F*Ns:2]+1j*b[1:8*F*Ns:2]
Z[ci,:,2*cj]=c[0:2*F*Ns]
Z[ci,:,2*cj+1]=c[2*F*Ns:4*F*Ns]
return Ns,F,thetak,phik,Z
# return K,C
def skytocoherencies(skymodel,clusterfile,uvwfile,N,freq,ra0,dec0):
# use skymodel,clusterfile and predict coherencies for uvwfile coordinates
# C: K way tensor, each slice Tx4, T: total samples, 4: XX,XY(=0),YX(=0),YY
# N : stations, ra0,dec0: phase center (rad), freq: frequency
# light speed
c=2.99792458e8
# uvw file
fh=open(uvwfile,'r')
fullset=fh.readlines()
fh.close()
# total samples=baselines x timeslots
T=len(fullset)
uu=np.zeros(T,dtype=np.float32)
vv=np.zeros(T,dtype=np.float32)
ww=np.zeros(T,dtype=np.float32)
ci=0
for cl in fullset:
cl1=cl.split()
uu[ci]=float(cl1[0])
vv[ci]=float(cl1[1])
ww[ci]=float(cl1[2])
ci +=1
uu *=math.pi/c*freq
vv *=math.pi/c*freq
ww *=math.pi/c*freq
del fullset
fh=open(skymodel,'r')
fullset=fh.readlines()
fh.close()
S={}
for cl in fullset:
if (not cl.startswith('#')) and len(cl)>1:
cl1=cl.split()
S[cl1[0]]=cl1[1:]
fh=open(clusterfile,'r')
fullset=fh.readlines()
fh.close()
# determine number of clusters
ci=0
for cl in fullset:
if (not cl.startswith('#')) and len(cl)>1:
ci +=1
K=ci
# coherencies: K clusters, T rows, 4=XX,XY,YX,YY
C=np.zeros((K,T,4),dtype=np.csingle)
# output sky/cluster info for input to DQN
# format of each line: cluster_id l m sI sP
#fh=open('./skylmn.txt','w+')
ck=0 # cluster id
for cl in fullset:
if (not cl.startswith('#')) and len(cl)>1:
cl1=cl.split()
for sname in cl1[2:]:
# 3:ra 3:dec sI 0 0 0 sP 0 0 0 0 0 0 freq0
sinfo=S[sname]
mra=(float(sinfo[0])+float(sinfo[1])/60.+float(sinfo[2])/3600.)*360./24.*math.pi/180.0
mdec=(float(sinfo[3])+float(sinfo[4])/60.+float(sinfo[5])/3600.)*math.pi/180.0
(myll,mymm,mynn)=radectolm(mra,mdec,ra0,dec0)
mysI=float(sinfo[6])
f0=float(sinfo[17])
fratio=math.log(freq/f0)
sIo=math.exp(math.log(mysI)+float(sinfo[10])*fratio+float(sinfo[11])*math.pow(fratio,2)+float(sinfo[12])*math.pow(fratio,3))
# add to C
uvw=(uu*myll+vv*mymm+ww*mynn)
XX=(np.cos(uvw)+1j*np.sin(uvw))*sIo
C[ck,:,0]=C[ck,:,0]+XX
#fh.write(str(ck)+' '+str(myll)+' '+str(mymm)+' '+str(mysI)+' '+str(sinfo[10])+'\n')
ck+=1
#fh.close()
# copy to YY
for ck in range(K):
C[ck,:,3]=C[ck,:,0]
return K,C
# return rho Kx1 vector
def read_rho(rhofile,K):
# initialize rho from text file
ci=0
rho=np.zeros(K,dtype=np.float32)
with open(rhofile,'r') as fh:
for curline in fh:
if (not curline.startswith('#')) and len(curline)>1:
curline1=curline.split()
# id hybrid rho
rho[ci]=float(curline1[2])
ci +=1
return rho
# return skymodel reading M components, M>2
def read_skycluster(skyclusterfile,M):
# sky/cluster model text file
# format: cluster_id l m sI sP
ci=0
skl=np.zeros((M,5),dtype=np.float32)
with open(skyclusterfile,'r') as fh:
for curline in fh:
if (not curline.startswith('#')) and len(curline)>1:
curline1=curline.split()
# cluster_id l m sI sP
for cj in range(5):
skl[ci,cj]=float(curline1[cj])
ci +=1
return skl
# return XX,XY,YX,YY :each Tx1 complex vectors
def readuvw(uvwfile):
a=np.loadtxt(uvwfile,delimiter=' ')
# read u,v,w,xx(re,im), xy(re,im) yx(re,im) yy(re,im)
XX=a[:,3]+1j*a[:,4]
XY=a[:,5]+1j*a[:,6]
YX=a[:,7]+1j*a[:,8]
YY=a[:,9]+1j*a[:,10]
return XX,XY,YX,YY
# write XX,XY,YX,YY to text file
def writeuvw(uvwfile,XX,XY,YX,YY):
# collect to a tuple
dfile=open(uvwfile,'w+')
T=XX.shape[0]
for ci in range(T):
xxyy=str(XX[ci].real)+' '+str(XX[ci].imag)+' '+str(XY[ci].real)+' '+str(XY[ci].imag)+' '+str(YX[ci].real)+' '+str(YX[ci].imag)+' '+str(YY[ci].real)+' '+str(YY[ci].imag)+'\n'
dfile.write(xxyy)
dfile.close()
def Bpoly(x,N):
# evaluate Bernstein basis functions
# x a vector of values in [0,1]
# y : for each x, N+1 values of the Bernstein basis evaluated at x
# [N_C_0 x^0 (1-x)^(N-0) , N_C_1 x^1 (1-x)^(N-1), ..., N_C_r x^r (1-x)^(N-r), ... , N_C_N x^N (1-x)^0 ]
# N_C_r = N!/(N-r)!r!
M=len(x)
# need array of factorials [0!,1!,...,N!]
fact=np.ones(N+1,dtype=np.float32)
for ci in range(1,N+1):
fact[ci]=fact[ci-1]*(ci)
# need powers of x and (1-x)
px=np.ones((N+1,M),dtype=np.float32)
p1x=np.ones((N+1,M),dtype=np.float32)
for ci in range(1,N+1):
px[ci,:]=px[ci-1,:]*x
p1x[ci,:]=p1x[ci-1,:]*(1.-x)
y=np.zeros((N+1,M),dtype=np.float32)
for ci in range(1,N+2): # r goes from 0 to N
# N_C_r x^r (1-x)^(N-r)
y[ci-1]=fact[N]/(fact[N-ci+1]*fact[ci-1])*px[ci-1]*p1x[N-ci+1]
return y.transpose()
# return F: 2Nx2N, and P: 2N Ne x 2 N
def consensus_poly(Ne,N,freqs,f0,fidx,polytype=0,rho=0.0,alpha=0.0):
# Ne: polynomial order (number of terms)
# N: stations
# freqs: Nfx1 freq vector
# f0: reference freq
# fidx: 0,1,... frequency index to create F (working frequency)
# polytype:0 ordinary, 1 Bernstein
# alpha: regularization parameter in federated averaging/spatial constraints
Nf=len(freqs)
Bfull=np.zeros((Nf,Ne),dtype=np.float32)
if (polytype==0):
Bfull[:,0]=1.
ff=(freqs-f0)/f0
for cj in range(1,Ne):
Bfull[:,cj]=np.power(ff,cj)
else:
ff=(freqs-freqs.min())/(freqs.max()-freqs.min())
Bfull=Bpoly(ff,Ne-1)
Bi=np.zeros((Ne,Ne),dtype=np.float32)
for cf in range(Nf):
Bi=Bi+np.outer(Bfull[cf],Bfull[cf])
# federated averaging/spatial constraing comes in as alpha x I
Bi=np.linalg.pinv(Bi+alpha*np.eye(Ne))
# select correct freq. component
Bf=np.kron(Bfull[fidx],np.eye(2*N))
P=np.matmul(np.kron(Bi,np.eye(2*N)),Bf.transpose())
F=np.eye(2*N)-rho*np.matmul(Bf,P)
return F,P
# return H=K x 4Nx4N tensor
def Hessianres(R,C,J,N):
# B: baselines=N(N-1)/2
# T: timeslots for this interval
# R: 2*B*Tx2 - residual for this interval
# C: KxB*Tx4 - coherencies for this interval
# J: Kx2Nx2 - valid solution for this interval
# instead of using input V, use residual R to calculate the Hessian
# Hess is 4Nx4N matrix, build by accumulating 4x4 kron products into a NxN block matrix
# and averaging over T time slots
# notation:
# Y \kron A_p^T ( Z ) A_q means p-th row, q-th col block is replaced by Y \kron Z (4x4) matrix
# res_pq= V_pq - J_p C_pq J_q^H
# then, p,q baseline contribution
# -C^\star kron A_p^T res A_q - C^T kron A_q^T res^H A_p
# + (C J_q^H J_q C^H)^T kron A_p^T A_p + (C^H J_p^H J_p C)^T kron A_q^T A_q
B=N*(N-1)//2
T=R.shape[0]//(2*B)
K=C.shape[0]
H=np.zeros((K,4*N,4*N),dtype=np.csingle)
for k in range(K):
ck=0
for cn in range(T):
for p in range(N-1):
for q in range(p+1,N):
Res=R[2*ck:2*(ck+1),:]
Ci=C[k,ck,:].reshape((2,2),order='F')
Imp=np.kron(-np.conj(Ci),Res)
H[k,4*p:4*(p+1),4*q:4*(q+1)] +=Imp
H[k,4*q:4*(q+1),4*p:4*(p+1)] +=np.conj(Imp.transpose())
Res1=np.matmul(Ci,np.conj(J[k,2*q:2*(q+1),:].transpose()))
Res=np.matmul(Res1,np.conj(Res1.transpose()))
H[k,4*p:4*(p+1),4*p:4*(p+1)] +=np.kron(Res.transpose(),np.eye(2))
Res1=np.matmul(J[k,2*p:2*(p+1),:],Ci)
Res=np.matmul(np.conj(Res1.transpose()),Res1)
H[k,4*q:4*(q+1),4*q:4*(q+1)] +=np.kron(Res.transpose(),np.eye(2))
ck+=1
del Res,Res1,Imp,Ci
return H/(B*T)
# return dJ=K x 4N x 4B tensor
def Dsolutions(C,J,N,Dgrad,r):
# B: baselines=N(N-1)/2
# T: timeslots for this interval
# BT: BxT
# C: KxB*Tx4 - coherencies for this interval
# J: Kx2Nx2 - valid solution for this interval
# evaluate vec(\partial J/ \partial x_pp,qq) for all possible pp,qq (baselines)
# Dgrad is K x 4Nx4N tensor, build by accumulating 4x4 kron products into a NxN block matrix
# r: 0,1,2...7 : determine which element of 2x2 matrix is 1
# return dJ : K x 4N x N(N-1)/2 matrix (note: for each baseline, the values are averaged over timeslots)
B=N*(N-1)//2
T=C.shape[1]//B
K=C.shape[0]
dJ=np.zeros((K,4*N,B),dtype=np.csingle)
# setup 4x1 vector, one goes to depending on r
rr=np.zeros(8,dtype=np.float32)
rr[r]=1.
dVpq=rr[0:8:2]+1j*rr[1:8:2]
for k in range(K):
# ck will fill each column
ck=0
# setup 4N x B matrix (fixme: use a sparse matrix)
AdV=np.zeros((4*N,B),dtype=np.csingle)
for cn in range(T):
for p in range(N-1):
for q in range(p+1,N):
# fill up column ck of AdV
# left hand side (J_q C^H)^T , right hand side I
# kron product will fill only rows 4*(p-1)+1:4*p
Ci=C[k,ck,:].reshape((2,2),order='F')
lhs=np.matmul(J[k,2*q:2*(q+1),:],np.conj(Ci.transpose()))
fillvex=np.matmul(np.kron(lhs.transpose(),np.eye(2)),dVpq)
AdV[4*p:4*(p+1),ck%B] +=fillvex
ck +=1
dJ[k]=np.linalg.solve(Dgrad[k],AdV)
return dJ
# return dJ= 8 x K x 4N x 4B tensor (for all possible r values)
def Dsolutions_r(C,J,N,Dgrad):
# B: baselines=N(N-1)/2
# T: timeslots for this interval
# BT: BxT
# C: KxB*Tx4 - coherencies for this interval
# J: Kx2Nx2 - valid solution for this interval
# evaluate vec(\partial J/ \partial x_pp,qq) for all possible pp,qq (baselines)
# Dgrad is K | |
<reponame>macrocosme/frbpoppy<gh_stars>10-100
"""Class to generate a cosmic population of FRBs."""
import numpy as np
from frbpoppy.misc import pprint
from frbpoppy.number_density import NumberDensity
from frbpoppy.population import Population
import frbpoppy.direction_dists as did
import frbpoppy.dm_dists as dmd
import frbpoppy.time_dists as td
import frbpoppy.w_dists as wd
import frbpoppy.si_dists as sid
import frbpoppy.lum_dists as ld
import frbpoppy.galacticops as go
import frbpoppy.precalc as pc
class CosmicPopulation(Population):
"""Generate a cosmic FRB population."""
def __init__(self,
n_srcs=1e4,
n_days=1,
name='cosmic',
repeaters=False,
generate=False):
"""Generate a popuation of FRBs.
Args:
n_srcs (int): Number of FRB sources to generate.
n_days (float): Number of days over which FRBs are to be generated.
name (str): Population name.
repeaters (bool): Whether to generate a repeater population.
generate (bool): Whether to create a population.
Returns:
Population: Population of FRBs
"""
# Set up general population arguments
Population.__init__(self)
# Give population a name
self.name = 'cosmic'
if name:
self.name = name
# Set population arguments
self.n_srcs = int(n_srcs)
self.n_days = n_days
self.repeaters = repeaters
self.shape = (self.n_srcs,)
# If wanting repeaters
if self.repeaters:
self.set_time()
# Set up default models
self.set_emission_range()
self.set_dist()
self.set_direction()
self.set_lum()
self.set_si()
self.set_w()
self.set_dm_mw()
self.set_dm_igm()
self.set_dm_host()
self.set_dm()
# Whether to start generating a Cosmic Population
if generate:
self.generate()
def gen_index(self):
"""Generate indices for over each FRB source."""
self.frbs.index = np.arange(self.n_srcs)
def set_emission_range(self, low=100e6, high=10e9):
"""Set the emission range [Hz].
The frequency range between which FRB sources should emit the given
bolometric luminosity.
Args:
f_min (float): Lowest source frequency [Hz].
f_max (float): Highest source frequency [Hz].
"""
self.f_min = low
self.f_max = high
def gen_precalc(self):
"""Check whether pre-calculations have been run."""
pc.DistanceTable(H_0=self.H_0, W_m=self.W_m, W_v=self.W_v)
def set_dist(self, model='vol_co', **kwargs):
"""Set the number density model for calculating distances.
Args:
model (str): Number density model to use. Choice from
('vol_co', 'sfr', 'smd').
z_max (float): Maximum redshift.
H_0 (float): Hubble constant.
W_m (float): Density parameter Ω_m.
W_v (float): Cosmological constant Ω_Λ.
alpha (float): Desired log N log S slope for a perfect,
non-cosmological population.
"""
# Option to use your own model
if not isinstance(model, str):
self.dist_func = lambda: model(**kwargs)
return
# I sometimes use 'constant' instead of 'vol_co'
if model == 'constant':
model = 'vol_co'
# Check whether recognised number density model
if model not in ['vol_co', 'sfr', 'smd']:
raise ValueError('set_dist input not recognised')
# Set number density model
# Don't fear the lambda, merely delays executing the function
self.dist_func = lambda: NumberDensity(model=model, **kwargs)
def gen_dist(self):
"""Generate source distances."""
n_model = self.dist_func()
self.vol_co_max = n_model.vol_co_max
self.frbs.z, self.frbs.dist_co = n_model.draw(self.n_srcs)
def gen_gal_coords(self):
"""Generate galactic coordinates."""
frbs = self.frbs
# Get the proper distance
dist_pr = frbs.dist_co/(1+frbs.z)
# Convert into galactic coordinates
frbs.gx, frbs.gy, frbs.gz = go.lb_to_xyz(frbs.gl, frbs.gb, dist_pr)
def set_direction(self, model='uniform', **kwargs):
"""Set the model for generating the directions of the frb sources.
Args:
model (str): Choice from ('uniform').
if model == 'uniform':
min_ra (float): Minimum right ascenion [frac deg].
max_ra (float): Maximum right ascenion [frac deg].
min_dec (float): Minimum declination [frac deg].
max_dec (float): Maximum declination [frac deg].
"""
# Use your own function
if not isinstance(model, str):
self.direction_func = lambda: model(**kwargs)
return
# Or use a uniform distribution
if model == 'uniform':
self.direction_func = lambda: did.uniform(n_srcs=self.n_srcs,
**kwargs)
else:
raise ValueError('set_direction input not recognised')
def gen_direction(self):
"""Generate the direction of frbs."""
frbs = self.frbs
# Calculate right ascenion and declination
frbs.ra, frbs.dec = self.direction_func()
# Convert to galactic lat/long coordinates
frbs.gl, frbs.gb = go.radec_to_lb(frbs.ra, frbs.dec, frac=True)
def set_dm_mw(self, model='ne2001', **kwargs):
"""Set the model for the Milky Way dispersion measure.
Args:
model (str): Option of 'ne2001'.
"""
if not isinstance(model, str):
self.dm_mw_func = lambda: model(**kwargs)
return
# Distribution from which to draw dm_mw
if model == 'ne2001':
self.dm_mw_func = lambda: pc.NE2001Table().lookup(self.frbs.gl,
self.frbs.gb)
else:
raise ValueError('set_dm_mw input not recognised')
def gen_dm_mw(self):
"""Generate Milky Way dispersion measure."""
self.frbs.dm_mw = self.dm_mw_func()
def set_dm_igm(self, model='ioka', **kwargs):
"""Set intergalactic dispersion measure model.
Args:
model (str): Option of 'ioka'.
if model == 'ioka':
slope (float): Slope of the DM-z relationship.
std (float): Spread around the DM-z relationship.
spread_dist (str): 'normal' or 'lognormal'.
"""
# Possibility to use your own function
if not isinstance(model, str):
self.dm_igm_func = lambda: model(**kwargs)
return
# Distribution from which to draw intergalactic dm
if model == 'ioka':
self.dm_igm_func = lambda: dmd.ioka(z=self.frbs.z, **kwargs)
else:
raise ValueError('set_dm_igm input not recognised')
def gen_dm_igm(self):
"""Generate intergalactic dispersion measure."""
self.frbs.dm_igm = self.dm_igm_func()
def set_dm_host(self, model='gauss', **kwargs):
"""Set host galaxy dispersion measure.
Args:
model (str): Options from ('gauss', 'lognormal').
if model in ('gauss', 'lognormal'):
mean (float): Mean DM [pc/cm^3].
std (float): Standard deviation DM [pc/cm^3].
if model == 'constant':
value (float): Value to adopt [pc/cm^3].
"""
if not isinstance(model, str):
self.dm_host_func = lambda: model(**kwargs)
return
# Distribution from which to draw host dispersion measure
if model.startswith('gauss'):
self.dm_host_func = lambda: dmd.gauss(z=self.frbs.z,
n_srcs=self.n_srcs,
**kwargs)
elif model == 'lognormal':
self.dm_host_func = lambda: dmd.lognormal(z=self.frbs.z,
n_srcs=self.n_srcs,
**kwargs)
elif model == 'constant':
self.dm_host_func = lambda: dmd.constant(n_srcs=self.n_srcs,
**kwargs)
else:
raise ValueError('set_dm_host input not recognised')
def gen_dm_host(self):
"""Generate host dispersion measure."""
self.frbs.dm_host = self.dm_host_func()
def set_dm(self, mw=True, igm=True, host=True):
"""Set total dispersion measure.
Args:
mw (bool): Whether to include a Milky Way component
igm (bool): Whether to include an IGM component
host (bool): Whether to include a host galaxy component
"""
# Which components to include
self.dm_components = []
if mw:
self.dm_components.append(self.gen_dm_mw)
if igm:
self.dm_components.append(self.gen_dm_igm)
if host:
self.dm_components.append(self.gen_dm_host)
# Save those components to execute at a later stage
def run_dm():
[c() for c in self.dm_components]
return self.frbs.dm_mw + self.frbs.dm_igm + self.frbs.dm_host
self.dm_func = run_dm
def gen_dm(self):
"""Generate total dispersion measure."""
self.frbs.dm = self.dm_func()
def set_w(self, model='uniform', per_source='same', **kwargs):
"""Set intrinsic pulse widths model [ms].
Args:
model (str): Options from ('uniform', 'lognormal')
per_source (str): Model for a single source burst
distribution. Options from 'same' or 'different'
If model == 'constant':
value (float): Pulse width [ms].
If model == 'uniform':
low (float): Minimum pulse width [ms].
high (float): Maximum pulse width [ms].
If model == 'lognormal':
mean (float): Mean pulse width [ms].
std (float): Standard deviation pulse width [ms].
"""
# Each burst from the same source: same or different widths?
if per_source == 'same':
self.w_shape = lambda: self.n_srcs
elif per_source == 'different':
self.w_shape = lambda: self.shape
# Distribution from which to draw pulse widths
# Find available distributions to draw from
funcs = [d for d in dir(wd) if hasattr(getattr(wd, d), '__call__')]
funcs.remove('calc_w_arr')
# Set function
if model in funcs:
func = getattr(wd, model)
# If you're getting fancy with combined distributions
# See examples/adapting_population_parameters.py
self._transpose_w = False
for kw_value in kwargs.values():
if isinstance(kw_value, (list, np.ndarray)):
self.w_shape = lambda: self.shape[::-1]
self._transpose_w = True
self.w_func = lambda x: func(shape=x, z=self.frbs.z, **kwargs)
else:
raise ValueError('set_w input model not recognised')
def gen_w(self):
"""Generate pulse widths [ms]."""
shape = self.w_shape()
self.frbs.w_int, self.frbs.w_arr = self.w_func(shape)
# From combined distribution inputs
if self._transpose_w:
self.frbs.w_int = self.frbs.w_int.T
self.frbs.w_arr = self.frbs.w_arr.T
def set_si(self, model='gauss', per_source='same', **kwargs):
"""Set spectral index model.
Args:
model (str): Options from ('gauss')
per_source (str): Model for a single source burst
distribution. Options from ('same', 'different')
If model == 'constant':
value (float): Default spectal index.
If model == 'gauss':
mean (float): Mean spectral index
std (float): Standard deviation spectral index
"""
# Each burst from the same source: same or different si?
if per_source == 'same':
self.si_shape = lambda: self.n_srcs
elif per_source == 'different':
self.si_shape = lambda: self.shape
# Find available distributions to draw from
funcs = [d for d in dir(sid) if hasattr(getattr(sid, d), '__call__')]
# Set function
if model in funcs:
func = getattr(sid, model)
# If you're getting fancy with combined distributions
self._transpose_si = False
for kw_value in kwargs.values():
if isinstance(kw_value, (list, np.ndarray)):
self.si_shape = lambda: self.shape[::-1]
self._transpose_si = True
# Distribution from which to draw spectral indices
self.si_func = lambda x: func(shape=x, **kwargs)
else:
raise ValueError('set_si input not recognised')
def gen_si(self):
"""Generate spectral indices."""
shape = self.si_shape()
self.frbs.si = self.si_func(shape)
if self._transpose_si:
self.frbs.si = self.frbs.si.T
def set_lum(self, model='powerlaw', per_source='same', **kwargs):
"""Set luminosity function [ergs/s].
Args:
model (str): Options from ('powerlaw')
per_source (str): Model for a single source burst
distribution. Options from ('same', 'different')
If | |
Bond(None, None, order=order1)
bond2 = Bond(None, None, order=order2)
if order1 == order2:
self.assertTrue(bond1.isSpecificCaseOf(bond2))
else:
self.assertFalse(bond1.isSpecificCaseOf(bond2))
def testCopy(self):
"""
Test the Bond.copy() method.
"""
bond = self.bond.copy()
self.assertEqual(self.bond.order, bond.order)
def testPickle(self):
"""
Test that a Bond object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
bond = cPickle.loads(cPickle.dumps(self.bond))
self.assertEqual(self.bond.order, bond.order)
def testUpdateLonePairs(self):
"""
Test that updateLonePairs works as expected
"""
mol_N1sc_N5t = Molecule().fromAdjacencyList("""
1 N u0 p0 c+1 {2,T} {4,S}
2 N u0 p0 c+1 {1,T} {3,S}
3 N u0 p3 c-2 {2,S}
4 H u0 p0 c0 {1,S}""")
mol_N1s = Molecule().fromAdjacencyList("""
1 N u0 p2 c0 {2,S}
2 H u0 p0 c0 {1,S}""")
mol_N3s = Molecule().fromAdjacencyList("""
multiplicity 3
1 N u2 p1 c0 {2,S}
2 H u0 p0 c0 {1,S}""")
mol_N3b = Molecule().fromAdjacencyList("""
1 N u0 p1 c0 {2,D} {6,S}
2 C u0 p0 c0 {1,D} {3,S} {7,S}
3 C u0 p0 c0 {2,S} {4,D} {8,S}
4 C u0 p0 c0 {3,D} {5,S} {9,S}
5 C u0 p0 c0 {4,S} {6,D} {10,S}
6 C u0 p0 c0 {1,S} {5,D} {11,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
9 H u0 p0 c0 {4,S}
10 H u0 p0 c0 {5,S}
11 H u0 p0 c0 {6,S}""")
mol_N5s = Molecule().fromAdjacencyList("""
multiplicity 2
1 N u1 p0 c+1 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 O u0 p3 c-1 {1,S}""")
mol_N5d = Molecule().fromAdjacencyList("""
1 N u0 p0 c+1 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 O u0 p2 c0 {1,S} {5,S}
4 O u0 p3 c-1 {1,S}
5 H u0 p0 c0 {3,S}""")
mol_N5dd = Molecule().fromAdjacencyList("""
1 N u0 p2 c-1 {2,D}
2 N u0 p0 c+1 {1,D} {3,D}
3 O u0 p2 c0 {2,D}""")
mol_CH2_S = Molecule().fromAdjacencyList("""
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}""")
mol_carbonyl = Molecule().fromAdjacencyList("""
1 O u0 p2 c0 {2,D}
2 C u0 p0 c0 {1,D} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}""")
mol_N1sc_N5t.updateLonePairs()
mol_N1s.updateLonePairs()
mol_N3s.updateLonePairs()
mol_N3b.updateLonePairs()
mol_N5s.updateLonePairs()
mol_N5d.updateLonePairs()
mol_N5dd.updateLonePairs()
mol_CH2_S.updateLonePairs()
mol_carbonyl.updateLonePairs()
self.assertEqual(mol_N1sc_N5t.atoms[0].lonePairs, 0)
self.assertEqual(mol_N1sc_N5t.atoms[2].lonePairs, 3)
self.assertEqual(mol_N1s.atoms[0].lonePairs, 2)
self.assertEqual(mol_N3s.atoms[0].lonePairs, 1)
self.assertEqual(mol_N3b.atoms[0].lonePairs, 1)
self.assertEqual(mol_N5s.atoms[0].lonePairs, 0)
self.assertEqual(mol_N5s.atoms[3].lonePairs, 3)
self.assertEqual(mol_N5d.atoms[0].lonePairs, 0)
self.assertEqual(mol_N5d.atoms[1].lonePairs, 2)
self.assertEqual(mol_N5d.atoms[2].lonePairs, 2)
self.assertEqual(mol_N5d.atoms[3].lonePairs, 3)
self.assertEqual(mol_N5dd.atoms[0].lonePairs, 2)
self.assertEqual(mol_N5dd.atoms[1].lonePairs, 0)
self.assertEqual(mol_N5dd.atoms[2].lonePairs, 2)
self.assertEqual(mol_CH2_S.atoms[0].lonePairs, 1)
self.assertEqual(mol_carbonyl.atoms[0].lonePairs, 2)
self.assertEqual(mol_carbonyl.atoms[1].lonePairs, 0)
################################################################################
class TestMolecule(unittest.TestCase):
"""
Contains unit tests of the Molecule class.
"""
def setUp(self):
self.adjlist_1 = """
1 *1 C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 c0 {4,D}
"""
self.molecule = [Molecule().fromAdjacencyList(self.adjlist_1)]
self.adjlist_2 = """
1 *1 C u1 p0 {2,S}
2 *2 N u0 p0 c+1 {1,S} {3,S} {4,D}
3 O u0 p3 c-1 {2,S}
4 O u0 p2 {2,D}
"""
self.molecule.append(Molecule().fromAdjacencyList(self.adjlist_2,saturateH=True))
def testClearLabeledAtoms(self):
"""
Test the Molecule.clearLabeledAtoms() method.
"""
self.molecule[0].clearLabeledAtoms()
for atom in self.molecule[0].atoms:
self.assertEqual(atom.label, '')
def testContainsLabeledAtom(self):
"""
Test the Molecule.containsLabeledAtom() method.
"""
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertTrue(self.molecule[0].containsLabeledAtom(atom.label))
self.assertFalse(self.molecule[0].containsLabeledAtom('*3'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*4'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*5'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*6'))
def testGetLabeledAtom(self):
"""
Test the Molecule.getLabeledAtom() method.
"""
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertEqual(atom, self.molecule[0].getLabeledAtom(atom.label))
try:
self.molecule[0].getLabeledAtom('*3')
self.fail('Unexpected successful return from Molecule.getLabeledAtom() with invalid atom label.')
except ValueError:
pass
def testGetLabeledAtoms(self):
"""
Test the Molecule.getLabeledAtoms() method.
"""
labeled = self.molecule[0].getLabeledAtoms()
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertTrue(atom.label in labeled)
self.assertTrue(atom in labeled.values())
else:
self.assertFalse(atom.label in labeled)
self.assertFalse(atom in labeled.values())
multipleLabelMolecule = Molecule().fromAdjacencyList("""
1 * C u0 p0 c0 {2,S} {3,S} {5,S} {6,S}
2 * C u0 p0 c0 {1,S} {4,S} {7,S} {8,S}
3 * C u0 p0 c0 {1,S} {9,S} {10,S} {11,S}
4 * C u0 p0 c0 {2,S} {12,S} {13,S} {14,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 *1 H u0 p0 c0 {2,S}
8 *1 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 *1 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {4,S}
14 H u0 p0 c0 {4,S}
""")
labeled = multipleLabelMolecule.getLabeledAtoms()
self.assertTrue('*' in labeled)
self.assertTrue('*1' in labeled)
self.assertEqual(len(labeled['*']),4)
self.assertEqual(len(labeled['*1']),3)
def testGetFormula(self):
"""
Test the Molecule.getLabeledAtoms() method.
"""
self.assertEqual(self.molecule[0].getFormula(), 'CH2NO2')
self.assertEqual(self.molecule[1].getFormula(), 'CH2NO2')
def testRadicalCount(self):
"""
Test the Molecule.getRadicalCount() method.
"""
self.assertEqual( self.molecule[0].getRadicalCount(), sum([atom.radicalElectrons for atom in self.molecule[0].atoms]) )
self.assertEqual( self.molecule[1].getRadicalCount(), sum([atom.radicalElectrons for atom in self.molecule[1].atoms]) )
def testGetMolecularWeight(self):
"""
Test the Molecule.getMolecularWeight() method.
"""
self.assertAlmostEqual(self.molecule[0].getMolecularWeight() * 1000, 60.03, 2)
self.assertAlmostEqual(self.molecule[1].getMolecularWeight() * 1000, 60.03, 2)
def testFromAdjacencyList(self):
"""
Test the Molecule.fromAdjacencyList() method.
"""
# molecule 1
self.assertTrue(self.molecule[0].multiplicity == 2)
atom1 = self.molecule[0].atoms[0]
atom2 = self.molecule[0].atoms[3]
atom3 = self.molecule[0].atoms[4]
atom4 = self.molecule[0].atoms[5]
self.assertTrue(self.molecule[0].hasBond(atom2,atom1))
self.assertTrue(self.molecule[0].hasBond(atom2,atom3))
self.assertTrue(self.molecule[0].hasBond(atom2,atom4))
self.assertFalse(self.molecule[0].hasBond(atom1,atom3))
self.assertFalse(self.molecule[0].hasBond(atom1,atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
# molecule 2
self.assertTrue(self.molecule[1].multiplicity == 2)
atom1 = self.molecule[1].atoms[0]
atom2 = self.molecule[1].atoms[1]
atom3 = self.molecule[1].atoms[2]
atom4 = self.molecule[1].atoms[3]
self.assertTrue(self.molecule[1].hasBond(atom2,atom1))
self.assertTrue(self.molecule[1].hasBond(atom2,atom3))
self.assertTrue(self.molecule[1].hasBond(atom2,atom4))
self.assertFalse(self.molecule[1].hasBond(atom1,atom3))
self.assertFalse(self.molecule[1].hasBond(atom1,atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testToAdjacencyList(self):
"""
Test the Molecule.toAdjacencyList() method.
"""
adjlist_1 = self.molecule[0].toAdjacencyList(removeH=False)
newMolecule = Molecule().fromAdjacencyList(adjlist_1)
self.assertTrue(self.molecule[0].isIsomorphic(newMolecule))
#self.assertEqual(adjlist_1.strip(), self.adjlist_1.strip())
# def testFromOldAdjacencyList(self):
# """
# Test we can read things with implicit hydrogens.
# """
# adjList = """
# 1 O 0
# """ # should be Water
# molecule = Molecule().fromAdjacencyList(adjList, saturateH=True) # only works with saturateH=True
# self.assertEqual(molecule.getFormula(),'H2O')
def testIsomorphism(self):
"""
Check the graph isomorphism functions.
"""
molecule1 = Molecule().fromSMILES('C=CC=C[CH]C')
molecule2 = Molecule().fromSMILES('C[CH]C=CC=C')
self.assertTrue(molecule1.isIsomorphic(molecule2))
self.assertTrue(molecule2.isIsomorphic(molecule1))
def testSubgraphIsomorphism(self):
"""
Check the graph isomorphism functions.
"""
molecule = Molecule().fromSMILES('C=CC=C[CH]C')
group = Group().fromAdjacencyList("""
1 Cd u0 p0 c0 {2,D}
2 Cd u0 p0 c0 {1,D}
""")
self.assertTrue(molecule.isSubgraphIsomorphic(group))
mapping = molecule.findSubgraphIsomorphisms(group)
self.assertTrue(len(mapping) == 4, "len(mapping) = %d, should be = 4" % (len(mapping)))
for map in mapping:
self.assertTrue(len(map) == min(len(molecule.atoms), len(group.atoms)))
for key, value in map.iteritems():
self.assertTrue(key in molecule.atoms)
self.assertTrue(value in group.atoms)
def testSubgraphIsomorphismAgain(self):
molecule = Molecule()
molecule.fromAdjacencyList("""
1 * C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,D} {10,S}
4 C u0 p0 c0 {3,D} {5,S} {11,S}
5 C u0 p0 c0 {4,S} {6,S} {12,S} {13,S}
6 C u0 p0 c0 {5,S} {14,S} {15,S} {16,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {6,S}
""")
group = Group()
group.fromAdjacencyList("""
1 * C u0 p0 c0 {2,D} {3,S} {4,S}
2 C u0 p0 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")
labeled1 = molecule.getLabeledAtoms().values()[0]
labeled2 = group.getLabeledAtoms().values()[0]
initialMap = {labeled1: labeled2}
self.assertTrue(molecule.isSubgraphIsomorphic(group, initialMap))
initialMap = {labeled1: labeled2}
mapping = molecule.findSubgraphIsomorphisms(group, initialMap)
self.assertTrue(len(mapping) == 2, "len(mapping) = %d, should be = 2" % (len(mapping)))
for map in mapping:
self.assertTrue(len(map) == min(len(molecule.atoms), len(group.atoms)))
for key, value in map.iteritems():
self.assertTrue(key in molecule.atoms)
self.assertTrue(value in group.atoms)
def testSubgraphIsomorphismManyLabels(self):
molecule = Molecule() # specific case (species)
molecule.fromAdjacencyList("""
1 *1 C u1 p0 c0 {2,S} {3,S} | |
<reponame>mrsempress/mmdetection<filename>mmdet/models/anchor_heads/ctx_head.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox import bbox_overlaps
from mmdet.core.utils.common import gather_feat, tranpose_and_gather_feat
from mmdet.models.losses import ct_focal_loss, weighted_l1, smooth_l1_loss, giou_loss
from mmdet.models.utils import (
build_norm_layer, gaussian_radius, draw_umich_gaussian, ConvModule, simple_nms,
build_conv_layer, ShortcutConv2d, draw_truncate_gaussian)
from mmdet.ops import ModulatedDeformConvPack, ModulatedDeformConv
from .anchor_head import AnchorHead
from ..registry import HEADS
@HEADS.register_module
class CTXHead(AnchorHead):
def __init__(self,
planes=(128, 64),
hm_head_channels=((128, 128), (64, 64)),
wh_head_channels=((32, 32), (32, 32)),
reg_head_channels=((32, 32), (32, 32)),
num_classes=81,
use_dla=False,
conv_cfg=None,
hm_init_value=-2.19,
length_range=((64, 512), (1, 64)),
down_ratio=(8, 4),
fast_nms=True,
trand_nms=False,
hm_weight=(1., 1.),
wh_weight=(0.1, 0.1),
off_weight=(1., 1.)):
super(AnchorHead, self).__init__()
self.planes = planes
self.num_classes = num_classes
self.num_fg = num_classes - 1
self.use_dla = use_dla
self.conv_cfg = conv_cfg
self.hm_init_value = hm_init_value
self.down_ratio = down_ratio
self.fast_nms = fast_nms
self.trand_nms = trand_nms
self.hm_weight = hm_weight
self.wh_weight = wh_weight
self.off_weight = off_weight
# heads
hm, wh, reg = multi_apply(
self._init_branch_layers,
self.planes,
hm_head_channels,
wh_head_channels,
reg_head_channels
)
self.hm = nn.ModuleList(hm)
self.wh = nn.ModuleList(wh)
self.reg = nn.ModuleList(reg)
self._target_generator = CTTargetGenerator(self.down_ratio, length_range, self.num_fg)
self._loss = CTLoss(self.hm_weight, self.wh_weight, self.off_weight)
def _init_branch_layers(self, planes, hm_chan, wh_chan, reg_chan):
hm_layers, wh_layers, reg_layers = [], [], []
inp = planes
for outp in hm_chan:
hm_layers.append(
ConvModule(
inp,
outp,
3,
padding=1,
conv_cfg=self.conv_cfg))
inp = outp
hm_layers.append(nn.Conv2d(inp, self.num_fg, 1))
inp = planes
for outp in wh_chan:
wh_layers.append(
ConvModule(
inp,
outp,
3,
padding=1,
conv_cfg=self.conv_cfg))
inp = outp
wh_layers.append(nn.Conv2d(inp, 2, 1))
inp = planes
for outp in reg_chan:
reg_layers.append(
ConvModule(
inp,
outp,
3,
padding=1,
conv_cfg=self.conv_cfg))
inp = outp
reg_layers.append(nn.Conv2d(inp, 2, 1))
hm_layers = nn.Sequential(*hm_layers)
wh_layers = nn.Sequential(*wh_layers)
reg_layers = nn.Sequential(*reg_layers)
return hm_layers, wh_layers, reg_layers
def init_weights(self):
for hm in self.hm:
for m in hm.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
hm[-1].bias.data.fill_(self.hm_init_value)
for wh in self.wh:
for m in wh.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
for reg in self.reg:
for m in reg.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
def forward_single(self, x, hm, wh, reg):
"""
Args:
feats: list(tensor).
Returns:
hm: tensor, (batch, 80, h, w).
wh: tensor, (batch, 2, h, w).
reg: Tensor, (batch, 2, h, w).
"""
f_hm = hm(x)
f_wh = wh(x)
f_reg = reg(x)
return f_hm, f_wh, f_reg
def forward(self, feats):
"""
Args:
feats: list(tensor).
Returns:
hm: tensor, (batch, 80, h, w).
wh: tensor, (batch, 2, h, w).
reg: None or tensor, (batch, 2, h, w).
"""
hms, whs, regs = multi_apply(
self.forward_single,
feats[::-1],
self.hm,
self.wh,
self.reg
)
return hms, whs, regs
def get_bboxes_single(self,
pred_heatmap,
pred_wh,
pred_reg_offset,
down_ratio,
topk=100):
batch, cat, height, width = pred_heatmap.size()
pred_heatmap = pred_heatmap.detach()
wh = pred_wh.detach()
reg = pred_reg_offset.detach()
# perform nms on heatmaps
heat = simple_nms(pred_heatmap.sigmoid_()) # used maxpool to filter the max score
# (batch, topk)
scores, inds, clses, ys, xs = self._topk(heat, topk=topk)
if reg is not None:
reg = tranpose_and_gather_feat(reg, inds) # (batch, topk, 2)
reg = reg.view(batch, topk, 2)
xs = xs.view(batch, topk, 1) + reg[:, :, 0:1]
ys = ys.view(batch, topk, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, topk, 1) + 0.5
ys = ys.view(batch, topk, 1) + 0.5
wh = tranpose_and_gather_feat(wh, inds) # (batch, topk, 2)
wh = wh.view(batch, topk, 2)
clses = clses.view(batch, topk, 1).float()
scores = scores.view(batch, topk, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2) * down_ratio
return clses, scores, bboxes
def get_bboxes(self,
pred_heatmap,
pred_wh,
pred_reg_offset,
img_metas,
cfg,
rescale=False):
clses, scores, bboxes = multi_apply(
self.get_bboxes_single,
pred_heatmap,
pred_wh,
pred_reg_offset,
self.down_ratio,
topk=getattr(cfg, 'max_per_img', 100)
)
result_list = []
score_thr = getattr(cfg, 'score_thr', 0.01)
for batch_i in range(bboxes[0].shape[0]):
scores_per_img = [score[batch_i] for score in scores]
scores_keep = [(score_per > score_thr).squeeze(-1) for score_per in scores_per_img]
scores_per_img = [score_per[score_keep] for (score_per, score_keep)
in zip(scores_per_img, scores_keep)]
bboxes_per_img = [bbox[batch_i][score_keep] for (bbox, score_keep)
in zip(bboxes, scores_keep)]
labels_per_img = [cls[batch_i][score_keep].squeeze(-1).long() for (cls, score_keep)
in zip(clses, scores_keep)]
img_shape = img_metas[batch_i]['pad_shape']
if self.fast_nms and len(scores_per_img) > 1:
for i in range(len(bboxes_per_img)):
bboxes_per_img[i][:, 0::2] = bboxes_per_img[i][:, 0::2].clamp(
min=0, max=img_shape[1] - 1)
bboxes_per_img[i][:, 1::2] = bboxes_per_img[i][:, 1::2].clamp(
min=0, max=img_shape[0] - 1)
b1_keeps, b2_keeps = [], []
for idx in range(len(scores_per_img) - 1):
bboxes_b1_per_img, bboxes_b2_per_img = bboxes_per_img[idx], \
bboxes_per_img[idx + 1]
labels_b1_per_img, labels_b2_per_img = labels_per_img[idx], \
labels_per_img[idx + 1]
scores_b1_per_img, scores_b2_per_img = scores_per_img[idx], \
scores_per_img[idx + 1]
duplicate_cls_b1 = bboxes_b1_per_img.new_ones(
(bboxes_b1_per_img.shape[0], bboxes_b2_per_img.shape[0]),
dtype=torch.long) * labels_b1_per_img.unsqueeze(-1)
duplicate_cls_b2 = bboxes_b1_per_img.new_ones(
(bboxes_b1_per_img.shape[0], bboxes_b2_per_img.shape[0]),
dtype=torch.long) * labels_b2_per_img.unsqueeze(0)
duplicate_cls = (duplicate_cls_b1 == duplicate_cls_b2)
b1_keep = bboxes_b1_per_img.new_ones((bboxes_b1_per_img.shape[0],),
dtype=torch.bool)
b2_keep = bboxes_b2_per_img.new_ones((bboxes_b2_per_img.shape[0],),
dtype=torch.bool)
if duplicate_cls.any():
ious_large = bbox_overlaps(bboxes_b1_per_img,
bboxes_b2_per_img) > 0.6
duplicate = ious_large & duplicate_cls
scores_b2_max, b2_max_loc = (scores_b2_per_img.view(
1, -1) * duplicate.float()).max(1)
b1_keep = (scores_b1_per_img.view(-1) >= scores_b2_max)
b2_max_loc_keep = b2_max_loc[~b1_keep]
b2_max_loc_keep_hot = bboxes_b1_per_img.new_zeros((duplicate.shape[1],),
dtype=torch.bool)
b2_max_loc_keep_hot.scatter(0, b2_max_loc_keep, 1)
b2_keep = ~(b1_keep.view(-1, 1) * duplicate).any(0)
b2_keep = b2_keep | b2_max_loc_keep_hot
b1_keeps.append(b1_keep)
b2_keeps.append(b2_keep)
level_keep = [b1_keeps[0]]
for idx in range(len(self.down_ratio) - 2):
level_keep.append(b1_keeps[idx + 1] & b2_keeps[idx])
level_keep.append(b2_keeps[-1])
scores_per_img = torch.cat([score_per_img[keep] for (score_per_img, keep)
in zip(scores_per_img, level_keep)], dim=0)
bboxes_per_img = torch.cat([bbox_per_img[keep] for (bbox_per_img, keep)
in zip(bboxes_per_img, level_keep)], dim=0)
labels_per_img = torch.cat([label_per_img[keep] for (label_per_img, keep)
in zip(labels_per_img, level_keep)], dim=0)
else:
if len(scores_per_img) == 1:
scores_per_img = scores_per_img[0]
bboxes_per_img = bboxes_per_img[0]
labels_per_img = labels_per_img[0]
else:
scores_per_img = torch.cat(scores_per_img, dim=0)
bboxes_per_img = torch.cat(bboxes_per_img, dim=0)
labels_per_img = torch.cat(labels_per_img, dim=0)
bboxes_per_img[:, 0::2] = bboxes_per_img[:, 0::2].clamp(
min=0, max=img_shape[1] - 1)
bboxes_per_img[:, 1::2] = bboxes_per_img[:, 1::2].clamp(
min=0, max=img_shape[0] - 1)
if rescale:
scale_factor = img_metas[batch_i]['scale_factor']
bboxes_per_img /= bboxes_per_img.new_tensor(scale_factor)
if self.trand_nms:
label_hard = bboxes_per_img.new_zeros(bboxes_per_img.shape[0], self.num_classes)
label_hard.scatter_(1, labels_per_img.view(-1, 1) + 1, 1)
scores_per_img = scores_per_img * label_hard.float()
bboxes_per_img, labels_per_img = multiclass_nms(
bboxes_per_img, scores_per_img, score_thr=score_thr,
nms_cfg=dict(type='nms', iou_thr=0.6), max_num=100)
else:
bboxes_per_img = torch.cat([bboxes_per_img, scores_per_img], dim=1)
labels_per_img = labels_per_img.float()
result_list.append((bboxes_per_img, labels_per_img))
return result_list
def loss(self,
pred_heatmap,
pred_wh,
pred_reg_offset,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
all_targets = self._target_generator(gt_bboxes, gt_labels, img_metas)
hm_loss, wh_loss, off_loss = self._loss(
pred_heatmap, pred_wh, pred_reg_offset, *all_targets)
loss_dict = dict()
for idx, down_ratio in enumerate(self.down_ratio):
loss_dict['losses/ctx_loss_hm_s{}'.format(down_ratio)] = hm_loss[idx]
loss_dict['losses/ctx_loss_wh_s{}'.format(down_ratio)] = wh_loss[idx]
loss_dict['losses/ctx_loss_reg_s{}'.format(down_ratio)] = off_loss[idx]
return loss_dict
def _topk(self, scores, topk):
batch, cat, height, width = scores.size()
# both are (batch, 80, topk)
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), topk)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# both are (batch, topk). select topk from 80*topk
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), topk)
topk_clses = (topk_ind / topk).int()
topk_inds = gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, topk)
topk_ys = gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, topk)
topk_xs = gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, topk)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
class CTTargetGenerator(object):
def __init__(self,
down_ratio,
length_range,
num_fg=80,
max_objs=128):
self.num_fg = num_fg
self.down_ratio = down_ratio
self.length_range = length_range
self.max_objs = max_objs
def target_single_image(self, gt_boxes, gt_labels, pad_shape, boxes_area=None):
"""
Args:
gt_boxes: tensor, tensor <=> img, (num_gt, 4).
gt_labels: tensor, tensor <=> img, (num_gt,).
pad_shape: tuple.
Returns:
heatmap: tensor, tensor <=> img, (80, h, w).
wh: tensor, tensor <=> img, (max_obj, 2).
reg_mask: tensor, tensor <=> img, (max_obj,).
ind: tensor, tensor <=> img, (max_obj,).
reg: tensor, tensor <=> img, (max_obj, 2).
center_location: tensor or None, tensor <=> img, (max_obj, 2).
"""
boxes_areas = self.bbox_areas(gt_boxes)
heatmap, wh, reg_mask, ind, reg = [], [], [], [], []
output_hs, output_ws, gt_level_idx = [], [], []
for i, (down_ratio) in enumerate(self.down_ratio):
output_h, output_w = [shape // down_ratio for shape in pad_shape]
heatmap.append(gt_boxes.new_zeros((self.num_fg, output_h, output_w)))
wh.append(gt_boxes.new_zeros((self.max_objs, 2)))
reg_mask.append(gt_boxes.new_zeros((self.max_objs,), dtype=torch.uint8))
ind.append(gt_boxes.new_zeros((self.max_objs,), dtype=torch.long))
reg.append(gt_boxes.new_zeros((self.max_objs, 2)))
output_hs.append(output_h)
output_ws.append(output_w)
gt_level_idx.append(
(boxes_areas >= self.length_range[i][0] ** 2) &
(boxes_areas <= self.length_range[i][1] ** 2))
heatmap, wh, reg_mask, ind, reg = multi_apply(
self.target_single_single,
heatmap,
wh,
reg_mask,
ind,
reg,
gt_level_idx,
output_hs,
output_ws,
self.down_ratio,
gt_boxes=gt_boxes,
gt_labels=gt_labels)
return heatmap, wh, reg_mask, ind, reg
def target_single_single(self,
heatmap,
wh,
reg_mask,
ind,
reg,
gt_level_idx,
output_h,
output_w,
down_ratio,
gt_boxes,
gt_labels):
gt_boxes = gt_boxes[gt_level_idx]
gt_labels = gt_labels[gt_level_idx]
gt_boxes /= down_ratio
gt_boxes[:, [0, 2]] = torch.clamp(gt_boxes[:, [0, 2]], 0, output_w - 1)
gt_boxes[:, [1, 3]] = torch.clamp(gt_boxes[:, [1, 3]], 0, output_h - 1)
hs, ws = (gt_boxes[:, 3] - gt_boxes[:, 1], gt_boxes[:, 2] - gt_boxes[:, 0])
for k in range(gt_boxes.shape[0]):
cls_id = gt_labels[k] - 1
h, w = hs[k], ws[k]
if h > 0 and w > 0:
center = gt_boxes.new_tensor([(gt_boxes[k, 0] + gt_boxes[k, 2]) / 2,
(gt_boxes[k, 1] | |
A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the module
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`before_request` decorator.
self.after_request_funcs = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. They key of the dictionary is the
#: name of the module this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the loaded modules in a dictionary by name.
#:
#: .. versionadded:: 0.5
self.modules = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
self.add_url_rule(self.static_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
#: The Jinja2 environment. It is created from the
#: :attr:`jinja_options`.
self.jinja_env = self.create_jinja_environment()
self.init_jinja_globals()
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning ocurred (%d apples)', 42)
app.logger.error('An error occoured')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
return Environment(loader=_DispatchingJinjaLoader(self), **options)
def init_jinja_globals(self):
"""Called directly after the environment was created to inject
some defaults (like `url_for`, `get_flashed_messages` and the
`tojson` filter.
.. versionadded:: 0.5
"""
self.jinja_env.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages
)
self.jinja_env.filters['tojson'] = _tojson_filter
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overriden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
mod = _request_ctx_stack.top.request.module
if mod is not None and mod in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[mod])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host='127.0.0.1', port=5000, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. set this to ``'0.0.0.0'``
to have the server available externally as well.
:param port: the port of the webserver
:param options: the options to be forwarded to the underlying
Werkzeug server. See :func:`werkzeug.run_simple`
for more information.
"""
from werkzeug import run_simple
if 'debug' in options:
self.debug = options.pop('debug')
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
return run_simple(host, port, self, **options)
def test_client(self):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
.. versionchanged:: 0.4
added support for `with` block usage for the client.
"""
from flask.testing import FlaskClient
return FlaskClient(self, self.response_class, use_cookies=True)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set.
:param request: an instance of :attr:`request_class`.
"""
key = self.secret_key
if key is not None:
return Session.load_cookie(request, self.session_cookie_name,
secret_key=key)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
expires = domain = None
if session.permanent:
expires = datetime.utcnow() + self.permanent_session_lifetime
if self.config['SERVER_NAME'] is not None:
domain = '.' + self.config['SERVER_NAME']
session.save_cookie(response, self.session_cookie_name,
expires=expires, httponly=True, domain=domain)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
"""
options.setdefault('url_prefix', module.url_prefix)
options.setdefault('subdomain', module.subdomain)
state = _ModuleSetupState(self, **options)
for func in module._register_events:
func(state)
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', ('GET',))
provide_automatic_options = False
if 'OPTIONS' not in methods:
methods = tuple(methods) + ('OPTIONS',)
provide_automatic_options = True
rule = Rule(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not | |
import pytest
import datetime
import shutil
import os
from textwrap import dedent
from os.path import join
from ...api import Gradebook, MissingEntry
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderDb(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["db", "--help-all"])
run_nbgrader(["db", "student", "--help-all"])
run_nbgrader(["db", "student", "list", "--help-all"])
run_nbgrader(["db", "student", "remove", "--help-all"])
run_nbgrader(["db", "student", "add", "--help-all"])
run_nbgrader(["db", "student", "import", "--help-all"])
run_nbgrader(["db", "assignment", "--help-all"])
run_nbgrader(["db", "assignment", "list", "--help-all"])
run_nbgrader(["db", "assignment", "remove", "--help-all"])
run_nbgrader(["db", "assignment", "add", "--help-all"])
run_nbgrader(["db", "assignment", "import", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["db"], retcode=0)
run_nbgrader(["db", "student"], retcode=0)
run_nbgrader(["db", "student", "remove"], retcode=1)
run_nbgrader(["db", "student", "add"], retcode=1)
run_nbgrader(["db", "student", "import"], retcode=1)
run_nbgrader(["db", "assignment"], retcode=0)
run_nbgrader(["db", "assignment", "remove"], retcode=1)
run_nbgrader(["db", "assignment", "add"], retcode=1)
run_nbgrader(["db", "assignment", "import"], retcode=1)
def test_student_add(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--last-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "FooBar"
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--first-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name == "FooBar"
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--email=<EMAIL>", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email == "<EMAIL>"
def test_student_remove(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
# running it again should give an error
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
def test_student_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_list(self, db):
run_nbgrader(["db", "student", "add", "foo", "--first-name=abc", "--last-name=xyz", "--email=<EMAIL>", "--db", db])
run_nbgrader(["db", "student", "add", "bar", "--db", db])
out = run_nbgrader(["db", "student", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 students in the database:
bar (None, None) -- None, None
foo (xyz, abc) -- <EMAIL>, None
"""
).strip() + "\n"
def test_student_import(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,email
foo,abc,xyz,<EMAIL>
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
# check that it fails when no id column is given
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
first_name,last_name,email
abc,xyz,<EMAIL>
,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,foo
foo,abc,xyzzzz,blah
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyzzzz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_student_import_csv_spaces(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name, email
foo,abc,xyz,<EMAIL>
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_assignment_add(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
def test_assignment_remove(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("foo")
# running it again should give an error
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db], retcode=1)
def test_assignment_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "--force", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_list(self, db):
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
run_nbgrader(["db", "assignment", "add", "bar", "--db", db])
out = run_nbgrader(["db", "assignment", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 assignments in the database:
bar (due: None)
foo (due: 2017-01-08 16:31:22)
"""
).strip() + "\n"
def test_assignment_import(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name,duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_assignment_import_csv_spaces(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name, duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
# check that it fails when no id column is given
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
duedate
Sun Jan 8 2017 4:31:22 PM
,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name
foo
bar
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_upgrade_nodb(self, temp_cwd):
# test upgrading without a database
run_nbgrader(["db", "upgrade"])
def test_upgrade_current_db(self, course_dir):
# add assignment | |
region index
found_lines = sorted(temp_fl, key=itemgetter(3)) # used to be 2, 3 works better
if len(found_lines) == 0:
print "No lines with label '%s' found in current regions" % tiedz_lbl
return
#######################################################
# extract data and plot it
if not saveFile:
p.close('all')
pdpi = rcParams['figure.dpi']
fig, ax = p.subplots(len(found_lines)+addplot, 1, sharex=True, figsize=(float(settings['vplot_width'])/pdpi, float(settings['vplot_height'])/pdpi)) #default: 8*80, 13*80
fig.canvas.set_window_title(crsplus + 'Velocity stack')
class TwoClick:
def __init__(self, figure, settings):
self.second_click = False
self.figure = figure
self.cid_onclick = self.figure.canvas.mpl_connect('button_press_event', self.onclick)
self.settings = settings
self.clicks = {'x1': None, 'y1': None, 'x2': None, 'y2': None}
def onclick(self, event):
try:
if event.button == 3: # right click
fl_indx = event.inaxes.get_geometry()[2]-2 # based on subplot id
if self.settings['crs_display'] != 1:
fl_indx += 1
if event.ydata <= 0.0 or event.ydata >= 1.0:
return
if not self.second_click:
self.clicks['x1'], self.clicks['y1'] = event.xdata, event.ydata
else:
self.clicks['x2'], self.clicks['y2'] = event.xdata, event.ydata
if self.second_click:
# do the calc
norm_flux = self.clicks['y1']
delta_v = abs(self.clicks['x1']-self.clicks['x2'])
if not delta_v <= 0.0:
f = float(found_lines[fl_indx][2]['f'])
lam_0 = float(found_lines[fl_indx][2]['wv'])*1.0e-10 # in metres
wv_obs0 = float(found_lines[fl_indx][2]['wv'])*(float(found_lines[fl_indx][1])+1.0)
wv_obs = self.clicks['x1']/C*1000.0*wv_obs0+wv_obs0
z = wv_obs/float(found_lines[fl_indx][2]['wv'])-1.0
N, b = Nb_estimate(norm_flux, delta_v, f, lam_0)
print " %s %s %s %s 0.000SE 0.00 0.00E+00 0 ! " % (found_lines[fl_indx][0].ljust(8), ("%.5f" % N).rjust(8), ("%.7f" % z).rjust(11), ("%.4f" % b).rjust(9))
else:
self.second_click = False
return
self.second_click = not self.second_click
#
except TypeError:
pass
class KeyAction:
def __init__(self, figure):
self.figure = figure
self.cid_onrelease = self.figure.canvas.mpl_connect('key_release_event', self.onrelease)
def onrelease(self, event):
if event.key == ',':
print "Setting left bound to %.2f" % event.xdata
self.figure.get_axes()[-1].set_xlim(left=event.xdata)
self.figure.canvas.draw()
if event.key == '.':
print "Setting right bound to %.2f" % event.xdata
self.figure.get_axes()[-1].set_xlim(right=event.xdata)
self.figure.canvas.draw()
tc = TwoClick(fig, settings)
ka = KeyAction(fig)
vall = []
for i, fl in enumerate(found_lines):
rft = rft_all[fl[3]]
wl_raw, dat_raw = rft2Data(rft, F_WL, F_DATA, False)
wlbin, datbin = rft2Data(rft, F_WL, F_DATA, settings['plot_type'] <= 2)
wldat, fitdat = rft2Data(rft, F_WL, F_FIT, settings['plot_type'] == 2)
daterr = fExtract(rft, F_ERR)
# wldat = fExtract(rft, F_WL)
fitdat_raw = fExtract(rft, F_FIT)
twl = tExtract(rft, T_WL)
tcom = tExtract(rft, T_COM)
tsp = tExtract(rft, T_SPEC)
wv_obs = float(fl[2]['wv'])*(fl[1]+1.0)
vel_raw = [(w-wv_obs)/wv_obs*C/1000.0 for w in wl_raw]
vbin = [(w-wv_obs)/wv_obs*C/1000.0 for w in wlbin] # in km/s
vdat = [(w-wv_obs)/wv_obs*C/1000.0 for w in wldat] # in km/s
tv = [(w-wv_obs)/wv_obs*C/1000.0 for w in twl] # in km/s
vdelta = abs(vel_raw[0]-vel_raw[1])
vdata = {
'vel_raw': vel_raw,
'dat_raw': dat_raw,
'fitdat_raw': fitdat_raw,
'vbin': vbin,
'vdat': vdat,
'tv': tv,
'datbin': datbin,
'daterr': daterr,
'fitdat': fitdat,
'twl': twl,
'tcom': tcom,
'tsp': tsp,
'vdelta': vdelta
# 'ylabel': "%s_%i" % (fl[0], int(float(fl[2]['wv']))),
}
vall.append(vdata)
velocityPlot(ax[i+addplot], vdata, pc, fl, colour_config, tick_config, settings) ###### velocityPlot
ax[i+addplot].set_ylabel("%s %i" % (fl[0], int(float(fl[2]['wv']))), stretch='extra-condensed')
minvel = min([min(vi['vel_raw']) for vi in vall])
maxvel = max([max(vi['vel_raw']) for vi in vall])
fig.subplots_adjust(hspace = 0.00, top = 0.98, bottom = 0.05, right = 0.97, left = 0.10)
if cursor_on:
multi = MultiCursor(fig.canvas, ax, color='r', lw=1)
for a in ax[:1]:
hideXLabels(a)
ax[-1].set_xlabel('Velocity (km/s) [z = %.7f]' % found_lines[0][1])
packet = {
'z': [found_lines[0][1]]
}
dd_writer(packet, 'vel.z')
if vlow != None and vhigh != None:
ax[-1].set_xlim(vlow, vhigh)
else:
ax[-1].set_xlim(minvel, maxvel)
if settings['crs_display'] == 1:
# calculate the CRS, use the bluemost pixel as the reference value for the grid
# zerovel_indxs = [findClosest(0.0, vi['vel_raw']) for vi in vall]
# zerovel = [vall[i]['vel_raw'][zi] for i, zi in enumerate(zerovel_indxs)]
# print zerovel
vdel = sum([vi['vdelta'] for vi in vall])/len(vall)
numbins = int((maxvel-minvel)/vdel)
crs_vel = [minvel+i*vdel for i in range(numbins)]
# crs_mask = []
crs_res = []
avgNoise = lambda l: sum(l)/sqrt(len(l))
# avgRes = lambda l: sum(l)/len(l)
for cv in crs_vel:
vel_indxs = [findClosest(cv, vi['vel_raw']) for vi in vall] # find closest bin crs_vel <-> vel_raw stacks
res_vals = []
for j, vel_i in enumerate(vel_indxs):
if abs(cv - vall[j]['vel_raw'][vel_i]) < vdel and vall[j]['daterr'][vel_i] > 0: # if within a bin of cv and a valid pixel
residual = calcResidual(vall[j]['dat_raw'][vel_i], vall[j]['fitdat_raw'][vel_i], vall[j]['daterr'][vel_i])
res_vals.append(residual)
if len(res_vals) > 0:
# crs_mask.append(1)
crs_res.append(avgNoise(res_vals))
else:
# crs_mask.append(0)
crs_res.append(NaN)
ax[0].axhline(1.0, c=colour_config['res_zero_one'])
ax[0].axhline(-1.0, c=colour_config['res_zero_one'])
# ax[0].plot(wldat_old, res_old, c=colour_config['res_zero_one'], linestyle='--')
ax[0].plot(crs_vel, crs_res, c=colour_config['residual'])
packet = {
'vel': crs_vel,
'res': crs_res
}
dd_writer(packet, 'vel.crs')
if saveFile:
print "Saving velocity stack plot to %s" % (saveFile)
p.savefig(saveFile)
p.close()
else:
p.ioff()
print termWarn("[Close display window to continue]")
print "Select left and right bounds using <,> and <.> keys"
p.show()
p.ion()
else:
print "No lines found with label '%s'" % tiedz_lbl
return
def findClosest(targetVal, valList):
""" Searches valList for closest match to targetVal and returns the
corresponding valList index"""
diffs = [abs(x-targetVal) for x in valList]
return diffs.index(min(diffs))
def dd_writer(packet, fpart):
if fcs.dd:
dd_fname = "%s.%s.dat" % (fcs.dd_prefix, fpart)
if fcs.dd_test:
print "@FILENAME: " + dd_fname
asciitable.write(packet, sys.stdout)
else:
print "DD: " + dd_fname
asciitable.write(packet, dd_fname)
def velocityPlot(ax, data, pc, fline, colour_config, tick_config, settings):
ax.axhline(1.0, c=colour_config['zero_one'], linestyle = ':')
ax.axhline(0.0, c=colour_config['zero_one'], linestyle = ':')
dd_fpart = "%s_%.4f" % (''.join(ff for ff in fline[0] if ff.isalpha()), float(fline[2]['wv']))
if settings['plot_type'] == 4:
ax.errorbar(data['vbin'], data['datbin'], yerr=data['daterr'], color=colour_config['data'], fmt=None)
packet = {
'vbin': data['vbin'],
'datbin': data['datbin'],
'daterr': data['daterr']
}
dd_writer(packet, dd_fpart+'.vel.data.errbar')
elif settings['plot_type'] == 5:
filla = [d-e for d, e in zip(data['datbin'], data['daterr'])]
fillb = [d+e for d, e in zip(data['datbin'], data['daterr'])]
ax.fill_between(data['vbin'], filla, y2 = fillb, lw=0.0, color=colour_config['data_contour'])
packet = {
'vbin': data['vbin'],
'filla': filla,
'fillb': fillb
}
dd_writer(packet, dd_fpart+'.vel.data.contour')
else:
ax.plot(data['vbin'], data['datbin'], color=colour_config['data'])
packet = {
'vbin': data['vbin'],
'datbin': data['datbin']
}
dd_writer(packet, dd_fpart+'.vel.data')
ax.plot(data['vdat'], data['fitdat'], color=colour_config['fit_new'])
packet = {
'vdat': data['vdat'],
'fitdat': data['fitdat']
}
dd_writer(packet, dd_fpart+'.vel.fit')
# vdata = {
# 'vel_raw': vel_raw,
# 'dat_raw': dat_raw,
# 'fitdat_raw': fitdat_raw,
# 'vbin': vbin,
# 'vdat': vdat,
# 'tv': tv,
# 'datbin': datbin,
# 'daterr': daterr,
# 'fitdat': fitdat,
# 'twl': twl,
# 'tcom': tcom,
# 'tsp': tsp,
# 'vdelta': vdelta
# # 'ylabel': "%s_%i" % (fl[0], int(float(fl[2]['wv']))),
# }
# vall.append(vdata)
# velocityPlot(ax[i+addplot], vdata, pc, fl, colour_config, tick_config, settings) ###### velocityPlot
# ax[i+addplot].set_ylabel("%s %i" % (fl[0], int(float(fl[2]['wv']))), stretch='extra-condensed')
# set view bounds
dax = ax.axis() # xmin, xmax, ymin, ymax
# ymax = dax[3]
if settings['flux_bottom'] == 1:
if (dax[2] >= 0.0):
ymin = 0.0 - 0.1
else:
ymin = dax[2]
ymin -= 0.05
ax.yaxis.set_major_locator(FixedLocator([0.0, 1.0]))
elif settings['flux_bottom'] == 2:
ymin = round(min(data['datbin']+data['fitdat'])-0.06, 1)
ymin -= 0.05
ax.yaxis.set_major_locator(FixedLocator([ymin, 1.0]))
elif settings['flux_bottom'] == 3:
tmpmin = min(data['datbin']+data['fitdat'])
tmpmax = max(data['datbin']+data['fitdat'])
ymin = tmpmin
ymax = tmpmax
ax.yaxis.set_major_locator(FixedLocator([ymin, ymax]))
if settings['vel_res'] == 2:
YMAX_OFFSET = 0.55
RESIDUAL_OFFSET = 0.3
RESIDUAL_SCALE = 0.05
else:
YMAX_OFFSET = 0.2
TICK_OFFSET = 0.05
TICK_SCALE = 0.05
ymax = 1.00 + YMAX_OFFSET*(1.0-ymin)
ax.axis([dax[0], dax[1], ymin, ymax])
if settings['vel_res'] == 2:
resy = 1.00 + RESIDUAL_OFFSET*(1.0-ymin)
##
# tick positioning
y0y1 = [1.00 + TICK_SCALE*(ymax-ymin), 1.00 - TICK_SCALE*(ymax-ymin)]
ytxt = None # don't plot text labels on velocity stack plot
if settings['tick_type'] == 1:
drawTicks(data['twl'], data['tcom'], data['tsp'], pc, ax, tick_config, settings, y0y1, ytxt, fline = fline, fpart = dd_fpart)
elif settings['tick_type'] == 2:
drawGroupedTicks(data['twl'], data['tcom'], data['tsp'], pc, ax, tick_config, settings, y0y1, ytxt, fline = fline, fpart = dd_fpart)
elif settings['tick_type'] == 3:
drawGroupedTicks(data['twl'], data['tcom'], data['tsp'], pc, ax, tick_config, settings, y0y1, ytxt, fline = fline, weighted = True, fpart = dd_fpart)
if settings['vel_res'] == 2:
ax.axhline(resy+RESIDUAL_SCALE*(ymax-ymin), c=colour_config['res_zero_one'])
ax.axhline(resy-RESIDUAL_SCALE*(ymax-ymin), c=colour_config['res_zero_one'])
residual = [calcResidual(data['dat_raw'][vel_i], data['fitdat_raw'][vel_i], data['daterr'][vel_i]) for vel_i in range(len(data['daterr']))]
proj_res = [r*RESIDUAL_SCALE*(ymax-ymin)+resy for r in residual] # plot coordinates projected residual
ax.plot(data['vdat'], proj_res, c=colour_config['residual'])
packet = {
'vdat': data['vdat'],
'residual': residual, #included for reference
'proj_res': proj_res
}
dd_writer(packet, dd_fpart+'vel.res')
ax.yaxis.set_minor_locator(FixedLocator([0.25, 0.5, 0.75]))
if settings['flux_bottom'] == 3:
ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
# else:
# ax.yaxis.set_major_formatter(FormatStrFormatter("%0.1f"))
ax.yaxis.set_minor_formatter(NullFormatter())
def setPlotBounds(axes, settings, fitdat_old, fitdat_new, datbin, wlbin):
# set view bounds
daxis = axes.axis() # xmin, xmax, ymin, ymax
ymax = daxis[3]
if settings['flux_bottom'] == 1:
if (daxis[2] >= 0.0):
ymin = 0.0 - 0.1
else:
ymin = daxis[2]
ymin -= 0.05
axes.yaxis.set_major_locator(FixedLocator([0.0, 1.0]))
elif settings['flux_bottom'] == 2:
ymin = round(min(fitdat_old+fitdat_new+datbin)-0.06, 1)
ymin -= 0.05
axes.yaxis.set_major_locator(FixedLocator([ymin, 1.0]))
elif settings['flux_bottom'] == 3:
tmpmin = min(fitdat_old+fitdat_new+datbin)
tmpmax = max(fitdat_old+fitdat_new+datbin)
ymax = tmpmax
ymin = tmpmin
axes.yaxis.set_major_locator(FixedLocator([ymin, ymax]))
if settings['flux_bottom'] == 3:
axes.yaxis.set_major_formatter(FormatStrFormatter("%.2e"))
# else:
# ax.yaxis.set_major_formatter(FormatStrFormatter("%0.1f"))
xmax = max(wlbin)
xmin = min(wlbin)
axes.axis([xmin, xmax, ymin, ymax])
def dumpFits(fname, rftList, comps):
pc = parseComps(comps)
# print pc
#['CrII', '13.00818', '', False, '2.3090528', 'ac', True, '3.9817', 'ac', True, '-0.069', '', False, 'QA', '0.00', 'E+00', 1, ' CrII 13.00818 2.3090528ac 3.9817ac -0.069QA 0.00 1.00E+00 0 ! 2\n']
# species0, N1,lbl2,small3, z4,lbl5,small6, b7,lbl8,small9, e10,lbl11,small12, bturb13, temp14, rgnflg15, id16, linecopy17
for i, rft in enumerate(rftList):
wldat = fExtract(rft, F_WL)
fitdat = fExtract(rft, F_FIT)
errdat = fExtract(rft, F_ERR)
datdat = fExtract(rft, F_DATA)
rlc = rft[RFT_R][R_L]
assert(len(wldat) == len(fitdat) == len(errdat) == len(datdat))
f = open("%s_%i.fit.dat" % (fname, i+1),'w')
f.write("# " + rlc.strip() + "\n")
f.write("# wl\tdata\tfit\terror\n")
for j in range(len(wldat)):
f.write("%s\t%s\t%s\t%s\n" % (wldat[j], fitdat[j], errdat[j], datdat[j]))
f.close()
twl = tExtract(rft, T_WL)
tcom = tExtract(rft, T_COM)
tsp = tExtract(rft, T_SPEC)
assert(len(twl) == len(tcom) == len(tsp))
f = open("%s_%i.comp.dat" % (fname, i+1),'w')
f.write("# wl\tline no\tspecies\tN\tNlbl\tz\tzlbl\tb\tblbl\n")
for j in range(len(twl)):
ti = tcom[j]-1
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (twl[j], tcom[j], pc[ti][0], pc[ti][1], pc[ti][2], pc[ti][4], pc[ti][5], pc[ti][7], pc[ti][8]))
f.close()
# print twl, tcom, tsp
# [6836.9024, 6837.04517, 6837.20158, 6837.31171, 6837.59291, 6837.94271] [1, 2, 3, 4, 5, 6] [1, 1, 1, 1, 1, 1]
def plotData(rft_old, rft_new, comp_old, comp_new, axes, settings, colour_config, live):
wlbin, datbin = rft2Data(rft_new, F_WL, F_DATA, settings['plot_type'] <= 2)
wldat_old, fitdat_old = rft2Data(rft_old, F_WL, F_FIT, settings['plot_type'] == 2)
wldat_new, fitdat_new = rft2Data(rft_new, F_WL, F_FIT, settings['plot_type'] == 2)
daterr = fExtract(rft_new, F_ERR)
# wldat_old = fExtract(rft_old, F_WLL)
# wldat_new = fExtract(rft_new, F_WL)
# fitdat_old = fExtract(rft_old, F_FIT)
# fitdat_new = fExtract(rft_new, F_FIT)
axes.axhline(1.0, c=colour_config['zero_one'], linestyle = ':')
axes.axhline(0.0, c=colour_config['zero_one'], linestyle = ':')
if settings['plot_type'] == 4:
axes.errorbar(wlbin, datbin, yerr=daterr, color=colour_config['data'], fmt=None)
packet = {
'wlbin': wlbin,
'datbin': datbin,
'daterr': daterr
}
dd_writer(packet, 'data.errbar')
elif settings['plot_type'] == 5:
filla = [d-e for d, e in zip(datbin, daterr)]
fillb = [d+e for d, e in zip(datbin, daterr)]
axes.fill_between(wlbin, filla, y2 = fillb, lw=0.0, color=colour_config['data_contour'])
packet = {
'wlbin': wlbin,
'filla': filla,
'fillb': fillb
}
dd_writer(packet, 'data.contour')
else:
axes.plot(wlbin, datbin, color=colour_config['data'])
packet = {
'wlbin': wlbin,
'datbin': datbin
}
dd_writer(packet, 'data')
new_color = colour_config['fit_new']
if live: new_color = colour_config['fit_live']
df = settings['decompose_fit']
if df > 1:
if df == 2:
comp = comp_new
rft = rft_new
if df == 3:
comp = comp_old
rft = rft_old
pc = parseComps(comp)
rlc = rft[RFT_R][R_L]
rwl = rft[RFT_R][R_WL]
rwh = rft[RFT_R][R_WH]
twl = tExtract(rft, T_WL)
tcom = tExtract(rft, T_COM)
tcom_unique | |
= simu.result[i].reshape(-1)
datai *= soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def compare_hardiron(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(1, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['origin', 'Add hardiron', ]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
root = 'result/compare_hardiron'
if not os.path.exists(root):
os.makedirs(root)
plt.savefig(os.path.join(root, '{}.jpg'.format(name)), dpi=600)
def compare_hardiron_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori, noise_type=0)
if choice == 0:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
if choice == 1:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
soft_iron_param = 5.0 * np.random.randn(
simu.result.size//simu.result.shape[0])+1
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1)
datai += soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def estimate_B_singular_noise(self, pSensor):
result = []
exp = expression()
for i in range(self.route.shape[0]):
routei = self.route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = np.sqrt(1.5) * np.random.randn(result.shape[0])
Noise_y = 0 * np.random.randn(result.shape[0])
Noise_z = 0 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
result += Noise
# add sensor resolution
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(self.route, SNR, result)
def simulate_2mag_3type_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
tmp.append(simulate_2mag_3type.exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type_delta_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
# the result after a short period of time
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx0 = r * np.sin(theta) * np.cos(phy)
dy0 = r * np.sin(theta) * np.sin(phy)
dz0 = r * np.cos(theta)
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx1 = r * np.sin(theta) * np.cos(phy)
dy1 = r * np.sin(theta) * np.sin(phy)
dz1 = r * np.cos(theta)
param2 = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'] + dx0,
params['Y0'] + dy0, params['Z0'] + dz0, params['m'],
params['theta0'], params['phy0'], params['X1'] + dx1,
params['Y1'] + dy1, params['Z1'] + dz1, params['m'],
params['theta1'], params['phy1'],
]
aaa = np.concatenate(
[simulate_2mag_3type.exp.VecB(*param).squeeze(),
simulate_2mag_3type.exp.VecB(*param2).squeeze() -
simulate_2mag_3type.exp.VecB(*param).squeeze()],
axis=0)
tmp.append(aaa)
print(aaa.shape)
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type(pSensor, size=1000, cls=3, edge=20):
size = int(size)
results = []
types = []
simulate_2mag_3type.exp = expression(2)
pool = Pool()
pool_results = []
i = 0
# for i in range(size * cls):
while(i < size * cls):
# G's Spherical Coordinates
t1 = np.pi * np.random.rand()
t2 = 2 * np.pi * np.random.rand()
# P1's Spherical Coordinates
tt1 = np.pi * np.random.rand()
pp1 = 2 * np.pi * np.random.rand()
# P2's Spherical Coordinates
tt2 = np.pi * np.random.rand()
pp2 = 2 * np.pi * np.random.rand()
typ = i % cls
G = 38.6600
# G = 0.0
if cls == 3:
if typ == 0:
r1 = np.random.rand() * 20 + edge
r2 = np.random.rand() * 20 + edge
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 1:
r1 = np.random.rand() * 20 + edge
r2 = np.random.rand() * (edge - 5) + 5
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 2:
r1 = np.random.rand() * (edge - 5) + 5
r2 = np.random.rand() * (edge - 5) + 5
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif cls == 2:
if typ == 0:
r1 = np.random.rand() * 20 + 30
r2 = np.random.rand() * 20 + 10
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
elif typ == 1:
r1 = np.random.rand() * 20 + 10
r2 = np.random.rand() * 20 + 10
params = {
'm': np.log(2.7),
'gx': G * np.sin(t1) * np.cos(t2) * 1e-6,
'gy': G * np.sin(t1) * np.sin(t2) * 1e-6,
'gz': G * np.cos(t1) * 1e-6,
'X0': 1e-2 * r1 * np.sin(tt1) * np.cos(pp1),
'Y0': 1e-2 * r1 * np.sin(tt1) * np.sin(pp1),
'Z0': 1e-2 * r1 * np.cos(tt1),
'theta0': np.pi * np.random.rand(),
'phy0': 2 * np.pi * np.random.rand(),
'X1': 1e-2 * r2 * np.sin(tt2) * np.cos(pp2),
'Y1': 1e-2 * r2 * np.sin(tt2) * np.sin(pp2),
'Z1': 1e-2 * r2 * np.cos(tt2),
'theta1': np.pi * np.random.rand(),
'phy1': 2 * np.pi * np.random.rand(),
}
# check G and R
# GG | |
construct the instance.
"""
cls.to_runner_api_parameter = (
lambda self, unused_context: (urn, None, self._get_component_coders()))
# pylint: disable=unused-variable
@Coder.register_urn(urn, None)
def from_runner_api_parameter(unused_payload, components, unused_context):
if components:
return cls(*components)
else:
return cls()
@Coder.register_urn(
python_urns.PICKLED_CODER, google.protobuf.wrappers_pb2.BytesValue)
def _pickle_from_runner_api_parameter(payload, components, context):
return deserialize_coder(payload.value)
class StrUtf8Coder(Coder):
"""A coder used for reading and writing strings as UTF-8."""
def encode(self, value):
return value.encode('utf-8')
def decode(self, value):
return value.decode('utf-8')
def is_deterministic(self):
return True
def to_type_hint(self):
return unicode
Coder.register_structured_urn(
common_urns.coders.STRING_UTF8.urn, StrUtf8Coder)
class ToStringCoder(Coder):
"""A default string coder used if no sink coder is specified."""
if sys.version_info.major == 2:
def encode(self, value):
# pylint: disable=unicode-builtin
return (value.encode('utf-8') if isinstance(value, unicode) # noqa: F821
else str(value))
else:
def encode(self, value):
return value if isinstance(value, bytes) else str(value).encode('utf-8')
def decode(self, _):
raise NotImplementedError('ToStringCoder cannot be used for decoding.')
def is_deterministic(self):
return True
class FastCoder(Coder):
"""Coder subclass used when a (faster) CoderImpl is supplied directly.
The Coder class defines _create_impl in terms of encode() and decode();
this class inverts that by defining encode() and decode() in terms of
_create_impl().
"""
def encode(self, value):
"""Encodes the given object into a byte string."""
return self.get_impl().encode(value)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
return self.get_impl().decode(encoded)
def estimate_size(self, value):
return self.get_impl().estimate_size(value)
def _create_impl(self):
raise NotImplementedError
class BytesCoder(FastCoder):
"""Byte string coder."""
def _create_impl(self):
return coder_impl.BytesCoderImpl()
def is_deterministic(self):
# type: () -> bool
return True
def to_type_hint(self):
return bytes
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:bytes',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.BYTES.urn, BytesCoder)
class BooleanCoder(FastCoder):
def _create_impl(self):
return coder_impl.BooleanCoderImpl()
def is_deterministic(self):
return True
def to_type_hint(self):
return bool
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.BOOL.urn, BooleanCoder)
class VarIntCoder(FastCoder):
"""Variable-length integer coder."""
def _create_impl(self):
return coder_impl.VarIntCoderImpl()
def is_deterministic(self):
# type: () -> bool
return True
def to_type_hint(self):
return int
def as_cloud_object(self, coders_context=None):
return {
'@type': 'kind:varint',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.VARINT.urn, VarIntCoder)
class FloatCoder(FastCoder):
"""A coder used for floating-point values."""
def _create_impl(self):
return coder_impl.FloatCoderImpl()
def is_deterministic(self):
# type: () -> bool
return True
def to_type_hint(self):
return float
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.DOUBLE.urn, FloatCoder)
class TimestampCoder(FastCoder):
"""A coder used for timeutil.Timestamp values."""
def _create_impl(self):
return coder_impl.TimestampCoderImpl()
def is_deterministic(self):
# () -> bool
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class _TimerCoder(FastCoder):
"""A coder used for timer values.
For internal use."""
def __init__(self, payload_coder):
# type: (Coder) -> None
self._payload_coder = payload_coder
def _get_component_coders(self):
# type: () -> List[Coder]
return [self._payload_coder]
def _create_impl(self):
return coder_impl.TimerCoderImpl(self._payload_coder.get_impl())
def is_deterministic(self):
# () -> bool
return self._payload_coder.is_deterministic()
def __eq__(self, other):
return (type(self) == type(other)
and self._payload_coder == other._payload_coder)
def __hash__(self):
return hash(type(self)) + hash(self._payload_coder)
Coder.register_structured_urn(
common_urns.coders.TIMER.urn, _TimerCoder)
class SingletonCoder(FastCoder):
"""A coder that always encodes exactly one value."""
def __init__(self, value):
self._value = value
def _create_impl(self):
return coder_impl.SingletonCoderImpl(self._value)
def is_deterministic(self):
# () -> bool
return True
def __eq__(self, other):
return type(self) == type(other) and self._value == other._value
def __hash__(self):
return hash(self._value)
def maybe_dill_dumps(o):
"""Pickle using cPickle or the Dill pickler as a fallback."""
# We need to use the dill pickler for objects of certain custom classes,
# including, for example, ones that contain lambdas.
try:
return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)
except Exception: # pylint: disable=broad-except
return dill.dumps(o)
def maybe_dill_loads(o):
"""Unpickle using cPickle or the Dill pickler as a fallback."""
try:
return pickle.loads(o)
except Exception: # pylint: disable=broad-except
return dill.loads(o)
class _PickleCoderBase(FastCoder):
"""Base class for pickling coders."""
def is_deterministic(self):
# () -> bool
# Note that the default coder, the PickleCoder, is not deterministic (for
# example, the ordering of picked entries in maps may vary across
# executions), and so is not in general suitable for usage as a key coder in
# GroupByKey operations.
return False
def as_cloud_object(self, coders_context=None, is_pair_like=True):
value = super(_PickleCoderBase, self).as_cloud_object(coders_context)
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(coders_context, is_pair_like=False),
self.as_cloud_object(coders_context, is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on PickleCoder since
# we can't always infer the return values of lambdas in ParDo operations, the
# result of which may be used in a GroupBykey.
def is_kv_coder(self):
# () -> bool
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class PickleCoder(_PickleCoderBase):
"""Coder using Python's pickle functionality."""
def _create_impl(self):
dumps = pickle.dumps
HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
return coder_impl.CallbackCoderImpl(
lambda x: dumps(x, HIGHEST_PROTOCOL), pickle.loads)
def as_deterministic_coder(self, step_label, error_message=None):
return DeterministicFastPrimitivesCoder(self, step_label)
def to_type_hint(self):
return Any
class DillCoder(_PickleCoderBase):
"""Coder using dill's pickle functionality."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(maybe_dill_dumps, maybe_dill_loads)
class DeterministicFastPrimitivesCoder(FastCoder):
"""Throws runtime errors when encoding non-deterministic values."""
def __init__(self, coder, step_label):
self._underlying_coder = coder
self._step_label = step_label
def _create_impl(self):
return coder_impl.DeterministicFastPrimitivesCoderImpl(
self._underlying_coder.get_impl(), self._step_label)
def is_deterministic(self):
# () -> bool
return True
def is_kv_coder(self):
# () -> bool
return True
def key_coder(self):
return self
def value_coder(self):
return self
def to_type_hint(self):
return Any
class FastPrimitivesCoder(FastCoder):
"""Encodes simple primitives (e.g. str, int) efficiently.
For unknown types, falls back to another coder (e.g. PickleCoder).
"""
def __init__(self, fallback_coder=PickleCoder()):
# type: (Coder) -> None
self._fallback_coder = fallback_coder
def _create_impl(self):
return coder_impl.FastPrimitivesCoderImpl(
self._fallback_coder.get_impl())
def is_deterministic(self):
# () -> bool
return self._fallback_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return DeterministicFastPrimitivesCoder(self, step_label)
def to_type_hint(self):
return Any
def as_cloud_object(self, coders_context=None, is_pair_like=True):
value = super(FastCoder, self).as_cloud_object(coders_context)
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(coders_context, is_pair_like=False),
self.as_cloud_object(coders_context, is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on FastPrimitivesCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
def is_kv_coder(self):
# () -> bool
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class Base64PickleCoder(Coder):
"""Coder of objects by Python pickle, then base64 encoding."""
# TODO(robertwb): Do base64 encoding where it's needed (e.g. in json) rather
# than via a special Coder.
def encode(self, value):
return base64.b64encode(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def decode(self, encoded):
return pickle.loads(base64.b64decode(encoded))
def is_deterministic(self):
# () -> bool
# Note that the Base64PickleCoder is not deterministic. See the
# corresponding comments for PickleCoder above.
return False
# We allow .key_coder() and .value_coder() to be called on Base64PickleCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
#
# TODO(ccy): this is currently only used for KV values from Create transforms.
# Investigate a way to unify this with PickleCoder.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class ProtoCoder(FastCoder):
"""A Coder for Google Protocol Buffers.
It supports both Protocol Buffers syntax versions 2 and 3. However,
the runtime version of the python protobuf library must exactly match the
version of the protoc compiler what was used to generate the protobuf
messages.
ProtoCoder is registered in the global CoderRegistry as the default coder for
any protobuf Message object.
"""
def __init__(self, proto_message_type):
# type: (google.protobuf.message.Message) -> None
self.proto_message_type = proto_message_type
def _create_impl(self):
return coder_impl.ProtoCoderImpl(self.proto_message_type)
def is_deterministic(self):
# () -> bool
# TODO(vikasrk): A proto message can be deterministic if it does not contain
# a Map.
return False
def as_deterministic_coder(self, step_label, error_message=None):
return DeterministicProtoCoder(self.proto_message_type)
def __eq__(self, other):
return (type(self) == type(other)
and self.proto_message_type == other.proto_message_type)
def __hash__(self):
return hash(self.proto_message_type)
@staticmethod
def | |
= v['prefactor']*np.exp(v['m']*np.square(np.cos(np.radians(x-v['x_center'])))) + v['baseline']
return m
def func2minimize(params, x, data):
v = params.valuesdict()
m = model(v, x)
return m - data
params = lmfit.Parameters()
params.add('prefactor', value=np.max(line.y)*0.1, min=0)
params.add('x_center', value=0, min=np.min(line.x), max=np.max(line.x))
params.add('m', value=2.0, min=0)
params.add('baseline', value=0, min=0, max=np.max(line.y), vary=False)
lm_result = lmfit.minimize(func2minimize, params, args=(line.x, line.y))
if run_args['verbosity']>=5:
print('Fit results (lmfit):')
lmfit.report_fit(lm_result.params)
fit_x = line.x
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'r', 'marker':None, 'linewidth':4.0})
fit_x = np.linspace(np.min(line.x), np.max(line.x), num=1000)
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line_extended = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'r', 'marker':None, 'linewidth':4.0})
return lm_result, fit_line, fit_line_extended
class linecut_qz_fit(Protocols.linecut_qz):
def __init__(self, name='linecut_qz_fit', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.png'
self.run_args = {'show_region' : False,
'plot_range' : [None, None, 0, None],
'show_guides' : True,
'markersize' : 0,
'linewidth' : 1.5,
}
self.run_args.update(kwargs)
@tools.run_default
def run(self, data, output_dir, **run_args):
# Usage example:
# linecut_qz_fit(show_region=False, show=False, qr=0.009, dq=0.0025, q_mode='qr', fit_range=fit_range, q0_expected=q0_expected, plot_range=[0, 0.08, 0, None]) ,
results = {}
line = data.linecut_qz(**run_args)
if 'show_region' in run_args and run_args['show_region']:
data.plot(show=True)
#line.smooth(2.0, bins=10)
outfile = self.get_outfile(data.name, output_dir)
line.plot(save=outfile, **run_args)
outfile = self.get_outfile(data.name, output_dir, ext='.dat')
line.save_data(outfile)
if 'incident_angle' not in run_args:
run_args['incident_angle'] = 0
import re
filename_re = re.compile('^.+_th(-?\d+\.\d+)_.+$')
m = filename_re.match(data.name)
if m:
run_args['incident_angle'] = float(m.groups()[0])
#if 'critical_angle_film' not in run_args:
#run_args['critical_angle_film'] = 0
#if 'critical_angle_substrate' not in run_args:
#run_args['critical_angle_substrate'] = 0
# Fit data
lm_result, fit_line, fit_line_extended = self._fit_peaks(line, **run_args)
# Save fit results
fit_name = 'fit_peaks'
prefactor_total = 0
for param_name, param in lm_result.params.items():
results['{}_{}'.format(fit_name, param_name)] = { 'value': param.value, 'error': param.stderr, }
if 'prefactor' in param_name:
prefactor_total += np.abs(param.value)
results['{}_prefactor_total'.format(fit_name)] = prefactor_total
results['{}_chi_squared'.format(fit_name)] = lm_result.chisqr/lm_result.nfree
# Calculate some additional things
q0 = results['{}_x_center1'.format(fit_name)]['value']
d = 0.1*2.*np.pi/q0
results['{}_d0'.format(fit_name)] = d
xi = 0.1*(2.*np.pi/np.sqrt(2.*np.pi))/results['{}_sigma1'.format(fit_name)]['value']
results['{}_grain_size'.format(fit_name)] = xi
if 'critical_angle_film' in run_args:
# Account for refraction distortion
theta_incident_rad = np.radians(run_args['incident_angle'])
theta_c_f_rad = np.radians(run_args['critical_angle_film'])
#theta_c_s_rad = np.radians(run_args['critical_angle_substrate'])
alpha_i_rad = np.arccos( np.cos(theta_incident_rad)/np.cos(theta_c_f_rad) )
def angle_to_q(two_theta_s_rad):
k = data.calibration.get_k()
qz = 2*k*np.sin(two_theta_s_rad/2.0)
return qz
def q_to_angle(q):
k = data.calibration.get_k()
two_theta_s_rad = 2.0*np.arcsin(q/(2.0*k))
return two_theta_s_rad
# Scattering from incident (refracted) beam
two_theta_s_rad = q_to_angle(q0)
theta_f_rad = two_theta_s_rad - theta_incident_rad
alpha_f_rad = np.arccos( np.cos(theta_f_rad)/np.cos(theta_c_f_rad) )
two_alpha_s_rad = alpha_i_rad + alpha_f_rad
qT = angle_to_q(two_alpha_s_rad)
results['{}_qT'.format(fit_name)] = qT
results['{}_dT'.format(fit_name)] = 0.1*2.*np.pi/qT
# Scattering from reflected beam
two_alpha_s_rad = abs( alpha_f_rad-alpha_i_rad )
qR = angle_to_q(two_alpha_s_rad)
results['{}_qR'.format(fit_name)] = qR
results['{}_dR'.format(fit_name)] = 0.1*2.*np.pi/qR
# Plot and save data
class DataLines_current(DataLines):
def _plot_extra(self, **plot_args):
xi, xf, yi, yf = self.ax.axis()
y_fit_max = np.max(self.lines[1].y)
yf = y_fit_max*2.0
v_spacing = (yf-yi)*0.06
q0 = self.results['fit_peaks_x_center1']['value']
color = 'purple'
yp = yf
s = '$q_0 = \, {:.4f} \, \mathrm{{\AA}}^{{-1}}$'.format(q0)
self.ax.text(xf, yp, s, size=15, color=color, verticalalignment='top', horizontalalignment='right')
yp -= v_spacing
s = r'$d_0 \approx \, {:.1f} \, \mathrm{{nm}}$'.format(self.results['fit_peaks_d0'])
self.ax.text(xf, yp, s, size=15, color=color, verticalalignment='top', horizontalalignment='right')
yp -= v_spacing
s = '$\sigma = \, {:.4f} \, \mathrm{{\AA}}^{{-1}}$'.format(self.results['fit_peaks_sigma1']['value'])
self.ax.text(xf, yp, s, size=15, color=color, verticalalignment='top', horizontalalignment='right')
yp -= v_spacing
s = r'$\xi \approx \, {:.1f} \, \mathrm{{nm}}$'.format(self.results['fit_peaks_grain_size'])
self.ax.text(xf, yp, s, size=15, color=color, verticalalignment='top', horizontalalignment='right')
self.ax.axvline(q0, color=color, linewidth=0.5)
self.ax.text(q0, yf, '$q_0$', size=20, color=color, horizontalalignment='center', verticalalignment='bottom')
s = '$q_T = \, {:.4f} \, \mathrm{{\AA}}^{{-1}}$ \n $d_T = \, {:.1f} \, \mathrm{{nm}}$'.format(self.results['fit_peaks_qT'], self.results['fit_peaks_dT'])
self.ax.text(q0, y_fit_max, s, size=15, color='b', horizontalalignment='left', verticalalignment='bottom')
self.ax.plot( [q0-self.results['fit_peaks_qT'], q0], [y_fit_max, y_fit_max], '-', color='b' )
s = '$q_R = \, {:.4f} \, \mathrm{{\AA}}^{{-1}}$ \n $d_R = \, {:.1f} \, \mathrm{{nm}}$'.format(self.results['fit_peaks_qR'], self.results['fit_peaks_dR'])
self.ax.text(q0, 0, s, size=15, color='r', horizontalalignment='left', verticalalignment='bottom')
self.ax.plot( [q0-self.results['fit_peaks_qR'], q0], [yi, yi], '-', color='r' )
if self.run_args['show_guides']:
# Show various guides of scattering features
theta_incident_rad = np.radians(self.run_args['incident_angle'])
# Direct
qz = 0
self.ax.axvline( qz, color='0.25' )
self.ax.text(qz, yf, '$\mathrm{D}$', size=20, color='0.25', horizontalalignment='center', verticalalignment='bottom')
# Horizon
qz = angle_to_q(theta_incident_rad)
l = self.ax.axvline( qz, color='0.5' )
l.set_dashes([10,6])
self.ax.text(qz, yf, '$\mathrm{H}$', size=20, color='0.5', horizontalalignment='center', verticalalignment='bottom')
# Specular beam
qz = angle_to_q(2*theta_incident_rad)
self.ax.axvline( qz, color='r' )
self.ax.text(qz, yf, '$\mathrm{R}$', size=20, color='r', horizontalalignment='center', verticalalignment='bottom')
if 'critical_angle_film' in self.run_args:
theta_c_f_rad = np.radians(self.run_args['critical_angle_film'])
# Transmitted (direct beam refracted by film)
if theta_incident_rad<=theta_c_f_rad:
qz = angle_to_q(theta_incident_rad) # Horizon
else:
alpha_i_rad = np.arccos( np.cos(theta_incident_rad)/np.cos(theta_c_f_rad) )
two_theta_s_rad = theta_incident_rad - alpha_i_rad
qz = angle_to_q(two_theta_s_rad)
l = self.ax.axvline( qz, color='b' )
l.set_dashes([4,4])
# Yoneda
qz = angle_to_q(theta_incident_rad+theta_c_f_rad)
self.ax.axvline( qz, color='gold' )
self.ax.text(qz, yf, '$\mathrm{Y}_f$', size=20, color='gold', horizontalalignment='center', verticalalignment='bottom')
if 'critical_angle_substrate' in self.run_args:
theta_c_s_rad = np.radians(self.run_args['critical_angle_substrate'])
# Transmitted (direct beam refracted by substrate)
if theta_incident_rad<=theta_c_s_rad:
qz = angle_to_q(theta_incident_rad) # Horizon
else:
alpha_i_rad = np.arccos( np.cos(theta_incident_rad)/np.cos(theta_c_s_rad) )
two_theta_s_rad = theta_incident_rad - alpha_i_rad
qz = angle_to_q(two_theta_s_rad)
self.ax.axvline( qz, color='b' )
self.ax.text(qz, yf, '$\mathrm{T}$', size=20, color='b', horizontalalignment='center', verticalalignment='bottom')
# Yoneda
qz = angle_to_q(theta_incident_rad+theta_c_s_rad)
self.ax.axvline( qz, color='gold' )
self.ax.text(qz, yf, '$\mathrm{Y}_s$', size=20, color='gold', horizontalalignment='center', verticalalignment='bottom')
self.ax.axis([xi, xf, yi, yf])
lines = DataLines_current([line, fit_line, fit_line_extended])
lines.copy_labels(line)
lines.results = results
lines.run_args = run_args
outfile = self.get_outfile(data.name+'-fit', output_dir, ext='.png')
try:
#lines.plot(save=outfile, error_band=False, ecolor='0.75', capsize=2, elinewidth=1, **run_args)
lines.plot(save=outfile, **run_args)
except ValueError:
pass
outfile = self.get_outfile(data.name, output_dir, ext='.dat')
line.save_data(outfile)
#print(results)
return results
def _fit_peaks(self, line, num_curves=1, **run_args):
# Usage: lm_result, fit_line, fit_line_extended = self.fit_peaks(line, **run_args)
line_full = line
if 'fit_range' in run_args:
line = line.sub_range(run_args['fit_range'][0], run_args['fit_range'][1])
import lmfit
def model(v, x):
'''Gaussians with constant background.'''
m = v['m']*x + v['b']
for i in range(num_curves):
m += v['prefactor{:d}'.format(i+1)]*np.exp( -np.square(x-v['x_center{:d}'.format(i+1)])/(2*(v['sigma{:d}'.format(i+1)]**2)) )
return m
def func2minimize(params, x, data):
v = params.valuesdict()
m = model(v, x)
return m - data
params = lmfit.Parameters()
m = (line.y[-1]-line.y[0])/(line.x[-1]-line.x[0])
b = line.y[0] - m*line.x[0]
#params.add('m', value=0)
#params.add('b', value=np.min(line.y), min=0, max=np.max(line.y))
#params.add('m', value=m, min=abs(m)*-10, max=abs(m)*+10)
params.add('m', value=m, min=abs(m)*-5, max=0) # Slope must be negative
params.add('b', value=b, min=0)
xspan = np.max(line.x) - np.min(line.x)
xpeak, ypeak = line.target_y(np.max(line.y))
if 'q0_expected' in run_args and run_args['q0_expected'] is not None:
xpeak, ypeak = line.target_x(run_args['q0_expected'])
prefactor = ypeak - (m*xpeak+b)
for i in range(num_curves):
#xpos = np.min(line.x) + xspan*(1.*i/num_curves)
#xpos, ypos = line.target_x(xpeak*(i+1))
xpos, ypos = xpeak, ypeak
params.add('prefactor{:d}'.format(i+1), value=prefactor, min=0, max=np.max(line.y)*4)
params.add('x_center{:d}'.format(i+1), value=xpos, min=np.min(line.x), max=np.max(line.x))
#params.add('x_center{:d}'.format(i+1), value=-0.009, min=np.min(line.x), max=np.max(line.x))
params.add('sigma{:d}'.format(i+1), value=0.003, min=0, max=xspan*0.5)
lm_result = lmfit.minimize(func2minimize, params, args=(line.x, line.y))
# https://lmfit.github.io/lmfit-py/fitting.html
#lm_result = lmfit.minimize(func2minimize, params, args=(line.x, line.y), method='nelder')
if run_args['verbosity']>=5:
print('Fit results (lmfit):')
lmfit.report_fit(lm_result.params)
fit_x = line.x
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'purple', 'marker':None, 'linewidth':4.0})
span = abs( np.max(line.x) - np.min(line.x) )
fit_x = np.linspace(np.min(line.x)-0.5*span, np.max(line.x)+0.5*span, num=500)
fit_y = model(lm_result.params.valuesdict(), fit_x)
fit_line_extended = DataLine(x=fit_x, y=fit_y, plot_args={'linestyle':'-', 'color':'purple', 'alpha':0.5, 'marker':None, 'linewidth':2.0})
return lm_result, fit_line, fit_line_extended
#End class linecut_qz_fit(linecut_qz)
########################################
# Experimental parameters
########################################
#cms.SAXS.setCalibration([464.0, 552.0], 5.038, [35.00, 35.00]) # 2017-06-18, 13.5 keV
# Data collected at SAXSx = 10, SAXSy = 19
# i.e.
mask_dir = SciAnalysis_PATH + '/SciAnalysis/XSAnalysis/masks/'
if True:
# SAXS detector on CMS
calibration = Calibration(wavelength_A=0.9184) # 13.5 keV
calibration.set_pixel_size(pixel_size_um=172.0)
calibration.set_image_size(1475, height=1679) # Pilatus2M
#calibration.set_beam_position(752, 1080) # SAXSx = -65, SAXSy = -73
calibration.set_beam_position(764, 1279) # SAXSx = -60, SAXSy = -42
calibration.set_distance(3.015)
#mask = Mask(mask_dir+'Pilatus300k_main_gaps-mask.png')
#mask.load('./Pilatus300k_current-mask.png')
mask = Mask(mask_dir+'/Dectris/Pilatus2M_gaps-mask.png')
mask.load('./Pilatus2M_CMS_3m-mask.png')
else:
# WAXS detector on CMS
from SciAnalysis.XSAnalysis.DataRQconv import *
calibration = CalibrationRQconv(wavelength_A=0.9184) # 13.5 keV
calibration.set_image_size(1042) # psccd Photonic Sciences CCD
calibration.set_pixel_size(pixel_size_um=101.7)
calibration.set_distance(0.232) # Bigger number moves theory rings outwards (larger spacing)
calibration.set_beam_position(22.0, 1042-22.0)
calibration.set_angles(det_orient=45, det_tilt=-21, det_phi=0, incident_angle=0., sample_normal=0.)
print('ratio Dw = {:.3f}'.format(calibration.get_ratioDw()))
mask = Mask(mask_dir+'psccd_generic-mask.png')
#match_re = re.compile('^.+_th0.110_\d+\.\ds_T(\d\d\d\.\d\d\d)C_5\.00s_\d+_saxs.jpg$')
# Files to analyze
########################################
#root_dir = '/GPFS/xf11bm/Pilatus300/'
#root_dir = '/GPFS/xf11bm/Pilatus300/2016-3Pilatus2M_current-mask.png/CFN_aligned-BCP/'
#source_dir = os.path.join(root_dir, '')
source_dir = '../'
#output_dir = os.path.join(source_dir, 'analysis/')
output_dir = './'
#pattern = 'AgBH*10.00s*'
pattern = '*'
#pattern = '*_th0.150_*'
pattern = '*_th0.150_*'
#pattern = '20170830_1x_60GdF3_PMMA_II_th0.150_5101.3s_x-0.000_15.00s_423535_saxs'
pattern, fit_range = '*_th0.150_*', [0.009, 0.033]
pattern, fit_range = '*_th0.150_*422099_saxs', [0.010, 0.020]
# qz_fit
pattern, fit_range = '20170823_48hBCP_I_th0.200_510.9s_x-1.500_15.00s_421982_saxs', [0.045, 0.070]
pattern, fit_range, q0_expected = '20170823_48hBCP_I_*', [0.045, 0.070], None
pattern, fit_range, q0_expected = '20170823_48hBCP_I_th0.100_*', [0.05, 0.080], 0.634
pattern, | |
# This file is part of beets.
# Copyright 2015, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import shutil
import math
from PIL import Image
from beets.plugins import BeetsPlugin
import beetsplug
from beets.ui import Subcommand
from beets import config
from beets import ui
from beets import art
from beets import util
from beets.util import normpath, bytestring_path
from beetsplug.fetchart import FetchArtPlugin
class ArtToolsPlugin(BeetsPlugin):
def __init__(self):
super(ArtToolsPlugin, self).__init__()
self.config.add({
'aspect_ratio_thresh': 0.8,
'size_thresh': 200,
'max_file_size': 1024 * 1024,
'additional_names': [],
'collage_tilesize': 200,
'collect_extract': True,
'collect_fetch_sources': beetsplug.fetchart.SOURCES_ALL,
'chooseart_weightings': {
'aspect_ratio': 1,
'pixels': 0.8,
'bytes': 0.2
},
'host': u'127.0.0.1',
'port': 8338
})
def commands(self):
list_bound_art_command = Subcommand('listboundart',
help='lists all cover arts of '
'selected albums')
list_bound_art_command.func = self.list_bound_art
list_bad_bound_art_command = Subcommand('listbadboundart',
help='lists all cover arts of '
'selected albums which '
'are bad')
list_bad_bound_art_command.func = self.list_bad_bound_art
copy_bound_art_command = Subcommand('copyboundart',
help='copys all cover arts of the '
'selected albums into a '
'single directory')
copy_bound_art_command.func = self.copy_bound_art
copy_bound_art_command.parser.add_option('-d', '--dir', dest='dir',
help='The directory to copy '
'the images to')
copy_bound_art_command.parser.add_option('-p', '--pretend',
dest='pretend',
action='store_true',
default=False,
help='do not copy anything, '
'only print files this '
'command would copy')
choose_art_command = Subcommand('chooseart',
help='chooses the best album art file')
choose_art_command.func = self.choose_art
choose_art_command.parser.add_option('-p', '--pretend',
dest='pretend',
action='store_true',
default=False,
help='do not change anything, '
'only print files this '
'command would choose')
delete_unused_art_command = Subcommand('deleteunusedart',
help='deletes all image files '
'matching the art '
'filenames which are not '
'used')
delete_unused_art_command.func = self.delete_unused_arts
delete_unused_art_command.parser.add_option('-p', '--pretend',
dest='pretend',
action='store_true',
default=False,
help='do not delete, only '
'print files to '
'delete')
list_art_command = Subcommand('listart',
help='lists all album art files '
'matching the configured names')
list_art_command.func = self.list_art
list_art_command.parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='verbose output')
art_collage_command = Subcommand('artcollage',
help='creates an image with all '
'cover arts of the selected '
'albums')
art_collage_command.func = self.art_collage
art_collage_command.parser.add_option('-o', '--out', dest='outFile',
help='The name of the image to '
'create')
art_collage_command.parser.add_option('-s', '--size', dest='tilesize',
help='The size of each cover '
'art')
collect_art_command = Subcommand('collectart',
help='collects all configured cover'
'arts')
collect_art_command.func = self.collect_art
collect_art_command.parser.add_option('-f', '--force', dest='force',
action='store_true',
default=False,
help='Force extraction or '
'download even if the file '
'already exists')
collect_art_command.parser.add_option('-v', '--verbose',
dest='verbose',
action='store_true',
default=False,
help='Output verbose logging '
'information')
web_choose_command = Subcommand('webchoose',
help='starts a webserver to choose art'
' files manually')
web_choose_command.parser.add_option('-d', '--debug',
action='store_true',
default=False, help='debug mode')
web_choose_command.func = self.web_choose
return [list_bound_art_command, list_bad_bound_art_command,
copy_bound_art_command, choose_art_command,
list_art_command, art_collage_command,
delete_unused_art_command, collect_art_command,
web_choose_command]
def list_bound_art(self, lib, opts, args):
"""List all art files bound to albums selected by the query"""
albums = lib.albums(ui.decargs(args))
for image in self._get_bound_art_files(albums):
self._log.info(util.displayable_path(image))
def list_bad_bound_art(self, lib, opts, args):
"""List all art files bound to albums selected by the query which
do not match the rules for a good album art."""
aspect_ratio_thresh = self.config['aspect_ratio_thresh'].get()
size_thresh = self.config['size_thresh'].get()
max_file_size = self.config['max_file_size'].get()
self._log.info(
u"Art is bad if its aspect ratio is < {0} or either width or "
u"height is < {1}", aspect_ratio_thresh, size_thresh)
albums = lib.albums(ui.decargs(args))
for image in self._get_bound_art_files(albums):
try:
width, height, size, aspect_ratio, file_size = self.get_image_info(image)
if aspect_ratio < aspect_ratio_thresh or size < size_thresh or \
file_size > max_file_size:
self._log.info(u'{0} ({1} x {2}) AR:{3:.4} {4}',
util.displayable_path(image), width,
height, aspect_ratio,
self.format_file_size(file_size))
except Exception:
pass
def copy_bound_art(self, lib, opts, args):
if not opts.dir:
self._log.info(u"Usage: beet copyart -d <destination directory> "
u"[<query>]")
return
dest_dir = os.path.normpath(opts.dir)
if not os.path.exists(dest_dir) or not os.path.isdir(dest_dir):
self._log.info(
u"'{0}' does not exist or is not a directory. "
u"Stopping.", util.displayable_path(dest_dir))
return
query = ui.decargs(args)
self._log.info(u"Copying all album art to {0}",
util.displayable_path(dest_dir), )
albums = lib.albums(query)
for album in albums:
if album.artpath:
new_filename = album.evaluate_template(u"$albumartist - "
u"$album",
for_path=True)
if album.albumtype == u'compilation':
new_filename = u"" + album.album
# Add the file extension
new_filename += os.path.splitext(album.artpath)[1]
old_path = util.syspath(album.artpath)
new_path = util.syspath(os.path.join(dest_dir, new_filename))
self._log.info(u"Copy '{0}' to '{1}'",
util.displayable_path(old_path),
util.displayable_path(new_path))
if not opts.pretend:
shutil.copy(old_path, new_path)
def choose_art(self, lib, opts, args):
art_filename = bytestring_path(config["art_filename"].get())
albums = lib.albums(ui.decargs(args))
for album in albums:
chosen_image = self.get_chosen_art(album)
if not opts.pretend and chosen_image:
chosen_image = bytestring_path(chosen_image)
new_image = os.path.join(album.item_dir(), art_filename +
os.path.splitext(chosen_image)[1])
if chosen_image != new_image:
shutil.copy(chosen_image, new_image)
album.set_art(new_image)
album.store()
def get_chosen_art(self, album):
aspect_ratio_thresh = self.config['aspect_ratio_thresh'].get()
size_thresh = self.config['size_thresh'].get()
max_file_size = self.config['max_file_size'].get()
album_path = album.item_dir()
if album_path:
images = self.get_art_files(album_path)
if images and len(images) > 0:
attributed_images = []
for image in images:
try:
width, height, size, aspect_ratio, file_size = self. \
get_image_info(util.syspath(image))
except IOError:
continue
attributed_images.append({'file': image,
'bytes': file_size,
'width': width,
'height': height,
'size': size,
'pixels': width * height,
'ar': aspect_ratio})
# Check if there are still images (maybe we can't get the info
# about all the images)
if len(attributed_images) == 0:
return None
filtered_images = \
filter(lambda i: i['ar'] >= aspect_ratio_thresh
and i['size'] >= size_thresh
and i['bytes'] < max_file_size,
attributed_images)
if len(filtered_images) == 0:
self._log.debug(
u"no image matched rules for album '{0}'", album.album)
filtered_images = attributed_images
# Find the best image:
# - Sort the images for aspect ratio, size in pixels and size
# in bytes
# - Store the ordinals for each sort
# - Summarize all ordinals per image (using weightings)
# - Choose the one with the lowest sum
self.add_points(filtered_images, 'ar', 0.0001)
self.add_points(filtered_images, 'pixels')
self.add_points(filtered_images, 'bytes')
weightings = self.config['chooseart_weightings'].get()
for filtered_image in filtered_images:
filtered_image['points'] = \
filtered_image['ar_points'] * weightings['aspect_ratio'] + \
filtered_image['pixels_points'] * weightings['pixels'] + \
filtered_image['bytes_points'] * weightings['bytes']
filtered_images = sorted(filtered_images,
key=lambda i: i['points'],
reverse=True)
chosen_image = filtered_images[0]['file']
self._log.info(u"chosen {0}",
util.displayable_path(chosen_image))
return chosen_image
else:
self._log.debug(
u"no image found for album {0}", album.album)
@staticmethod
def add_points(images, field, threshold=1.0):
sorted_images = sorted(images, key=lambda image: image[field])
ordinal = -1
last_value = -1
for i in range(len(sorted_images)):
if abs(last_value - sorted_images[i][field]) > threshold:
last_value = sorted_images[i][field]
ordinal += 1
sorted_images[i][field + '_points'] = ordinal
def delete_unused_art_of_album(self, album, pretend=False):
art_filename = config["art_filename"].get()
album_path = album.item_dir()
if album_path:
for image in self.get_art_files(album_path):
if os.path.splitext(os.path.basename(image))[0] != \
art_filename:
self._log.info(u"removing {0}",
util.displayable_path(image))
if not pretend:
os.remove(util.syspath(image))
def delete_unused_arts(self, lib, opts, args):
albums = lib.albums(ui.decargs(args))
for album in albums:
self.delete_unused_art_of_album(album, opts.pretend)
def list_art(self, lib, opts, args):
"""Prints all found images matching the configured names."""
albums = lib.albums(ui.decargs(args))
for album in albums:
albumpath = album.item_dir()
if albumpath:
images = self.get_art_files(albumpath)
for image in images:
info = u""
if opts.verbose:
width, height, size, aspect_ratio, file_size = \
self.get_image_info(util.syspath(image))
info = u" ({0} x {1}) AR:{2:.4} {3}".format(width, height,
aspect_ratio,
self.format_file_size(file_size))
self._log.info(util.displayable_path(image) + info)
def art_collage(self, lib, opts, args):
albums = lib.albums(ui.decargs(args))
images = self._get_bound_art_files(albums)
if not opts.outFile:
self._log.info(u"Usage: artcollage -o <output file> [-s <size>] "
u"[query]")
return
out_file = os.path.abspath(opts.outFile)
tile_size = opts.tilesize or self.config['collage_tilesize'].get()
try:
tile_size = int(tile_size)
except ValueError:
self._log.error(u"Unable to convert {0} into a number used for "
u"tile size. Aboard.".format(tile_size))
return
if len(images) < 1:
self._log.warn(u"No cover images selected. Aboard.")
return
if os.path.isdir(out_file):
out_file = os.path.join(out_file, "covers.jpg")
if not os.path.exists(os.path.split(out_file)[0]):
self._log.error(u"Destination does not exist.")
return
cols = int(math.ceil(math.sqrt(len(images))))
rows = int(math.ceil(len(images) / float(cols)))
result = Image.new("RGB",
(cols * tile_size, rows * tile_size),
"black")
for row in xrange(0, rows):
for col in xrange(0, cols):
if row * cols + col >= len(images):
break
image = Image.open(util.syspath(images[row * cols + col]))
if not image:
continue
image = image.resize((tile_size, tile_size))
result.paste(image, (col * tile_size, row * tile_size))
result.save(out_file)
def collect_art_for_albums(self, albums, force, verbose):
if self.config['collect_extract'].get():
self._log.info(u"Extracting cover arts for matched albums...")
success = 0
skipped = 0
for album in albums:
artpath = normpath(os.path.join(album.path, 'extracted'))
if self._art_file_exists(artpath) and not force:
skipped += 1
if verbose:
self._log.info(u" Skipping extraction for '{0}': "
u"file already exists.", album)
continue
if art.extract_first(self._log, artpath, album.items()):
success += 1
if verbose:
self._log.info(u" Extracted art for '{0}'.",
album)
elif verbose:
self._log.info(u" Could not extract art for '{0}'.",
album)
self._log.info(u" | |
,
("240910", "<NAME>", "RN", ) ,
("240920", "PASSAGEM", "RN", ) ,
("240930", "PATU", "RN", ) ,
("240933", "<NAME>", "RN", ) ,
("240940", "<NAME>", "RN", ) ,
("240950", "<NAME>", "RN", ) ,
("240960", "<NAME>", "RN", ) ,
("240970", "<NAME>", "RN", ) ,
("240980", "<NAME>", "RN", ) ,
("240990", "PENDÊNCIAS", "RN", ) ,
("241000", "PILÕES", "RN", ) ,
("241010", "<NAME>", "RN", ) ,
("241020", "PORTALEGRE", "RN", ) ,
("241025", "<NAME>", "RN", ) ,
("241030", "<NAME>", "RN", ) ,
("241040", "PUREZA", "RN", ) ,
("241050", "<NAME>", "RN", ) ,
("241060", "<NAME>", "RN", ) ,
("241070", "<NAME>", "RN", ) ,
("241080", "<NAME>", "RN", ) ,
("241090", "RIACHUELO", "RN", ) ,
("241100", "<NAME>", "RN", ) ,
("241105", "TIBAU", "RN", ) ,
("241110", "<NAME>", "RN", ) ,
("241120", "<NAME>", "RN", ) ,
("241140", "<NAME>", "RN", ) ,
("241142", "<NAME>", "RN", ) ,
("241150", "<NAME>", "RN", ) ,
("241160", "<NAME>", "RN", ) ,
("241170", "<NAME>", "RN", ) ,
("241180", "<NAME>", "RN", ) ,
("241190", "<NAME>", "RN", ) ,
("241200", "<NAME>", "RN", ) ,
("241210", "<NAME>", "RN", ) ,
("241220", "<NAME>", "RN", ) ,
("241230", "<NAME>", "RN", ) ,
("241240", "<NAME>", "RN", ) ,
("241250", "<NAME>", "RN", ) ,
("241255", "<NAME>", "RN", ) ,
("241260", "<NAME>", "RN", ) ,
("241270", "<NAME>", "RN", ) ,
("241280", "<NAME>", "RN", ) ,
("241290", "<NAME>", "RN", ) ,
("241300", "<NAME>", "RN", ) ,
("241310", "<NAME>", "RN", ) ,
("241320", "<NAME>", "RN", ) ,
("241330", "<NAME>", "RN", ) ,
("241335", "<NAME>", "RN", ) ,
("241340", "<NAME>", "RN", ) ,
("241350", "SERRINHA", "RN", ) ,
("241355", "<NAME>", "RN", ) ,
("241360", "<NAME>", "RN", ) ,
("241370", "<NAME>", "RN", ) ,
("241380", "<NAME>", "RN", ) ,
("241390", "TAIPU", "RN", ) ,
("241400", "TANGARÁ", "RN", ) ,
("241410", "<NAME>", "RN", ) ,
("241415", "<NAME>", "RN", ) ,
("241420", "<NAME>", "RN", ) ,
("241430", "<NAME>", "RN", ) ,
("241440", "TOUROS", "RN", ) ,
("241445", "<NAME>", "RN", ) ,
("241450", "UMARIZAL", "RN", ) ,
("241460", "UPANEMA", "RN", ) ,
("241470", "VÁRZEA", "RN", ) ,
("241475", "VENHA-VER", "RN", ) ,
("241480", "<NAME>", "RN", ) ,
("241490", "VIÇOSA", "RN", ) ,
("241500", "<NAME>", "RN", ) ,
("250010", "<NAME>", "PB", ) ,
("250020", "AGUIAR", "PB", ) ,
("250030", "<NAME>", "PB", ) ,
("250040", "<NAME>", "PB", ) ,
("250050", "ALAGOINHA", "PB", ) ,
("250053", "ALCANTIL", "PB", ) ,
("250057", "<NAME>", "PB", ) ,
("250060", "ALHANDRA", "PB", ) ,
("250070", "<NAME>", "PB", ) ,
("250073", "AMPARO", "PB", ) ,
("250077", "APARECIDA", "PB", ) ,
("250080", "ARAÇAGI", "PB", ) ,
("250090", "ARARA", "PB", ) ,
("250100", "ARARUNA", "PB", ) ,
("250110", "AREIA", "PB", ) ,
("250115", "<NAME>", "PB", ) ,
("250120", "AREIAL", "PB", ) ,
("250130", "AROEIRAS", "PB", ) ,
("250135", "ASSUNÇÃO", "PB", ) ,
("250140", "<NAME>", "PB", ) ,
("250150", "BANANEIRAS", "PB", ) ,
("250153", "BARAÚNA", "PB", ) ,
("250157", "<NAME>", "PB", ) ,
("250160", "<NAME>", "PB", ) ,
("250170", "<NAME>", "PB", ) ,
("250180", "BAYEUX", "PB", ) ,
("250190", "BELÉM", "PB", ) ,
("250200", "<NAME>", "PB", ) ,
("250205", "<NAME>", "PB", ) ,
("250210", "<NAME>", "PB", ) ,
("250215", "<NAME>", "PB", ) ,
("250220", "<NAME>", "PB", ) ,
("250230", "<NAME>", "PB", ) ,
("250240", "BONITO DE SANTA FÉ", "PB", ) ,
("250250", "BOQUEIRÃO", "PB", ) ,
("250260", "IGARACY", "PB", ) ,
("250270", "BORBOREMA", "PB", ) ,
("250280", "<NAME>", "PB", ) ,
("250290", "<NAME>", "PB", ) ,
("250300", "CAAPORÃ", "PB", ) ,
("250310", "CABACEIRAS", "PB", ) ,
("250320", "CABEDELO", "PB", ) ,
("250330", "<NAME>", "PB", ) ,
("250340", "<NAME>", "PB", ) ,
("250350", "<NAME>", "PB", ) ,
("250355", "CACIMBAS", "PB", ) ,
("250360", "CAIÇARA", "PB", ) ,
("250370", "CAJAZEIRAS", "PB", ) ,
("250375", "CAJAZEIRINHAS", "PB", ) ,
("250380", "<NAME>", "PB", ) ,
("250390", "CAMALAÚ", "PB", ) ,
("250400", "<NAME>", "PB", ) ,
("250403", "CAPIM", "PB", ) ,
("250407", "CARAÚBAS", "PB", ) ,
("250410", "CARRAPATEIRA", "PB", ) ,
("250415", "CASSERENGUE", "PB", ) ,
("250420", "CATINGUEIRA", "PB", ) ,
("250430", "<NAME>CHA", "PB", ) ,
("250435", "CATURITÉ", "PB", ) ,
("250440", "CONCEIÇÃO", "PB", ) ,
("250450", "CONDADO", "PB", ) ,
("250460", "CONDE", "PB", ) ,
("250470", "CONGO", "PB", ) ,
("250480", "COREMAS", "PB", ) ,
("250485", "COXIXOLA", "PB", ) ,
("250490", "CRUZ DO ESPÍRITO SANTO", "PB", ) ,
("250500", "CUBATI", "PB", ) ,
("250510", "CUITÉ", "PB", ) ,
("250520", "CUITEGI", "PB", ) ,
("250523", "CUITÉ DE MAMANGUAPE", "PB", ) ,
("250527", "CURRAL DE CIMA", "PB", ) ,
("250530", "<NAME>", "PB", ) ,
("250535", "DAMIÃO", "PB", ) ,
("250540", "DESTERRO", "PB", ) ,
("250550", "<NAME>", "PB", ) ,
("250560", "DIAMANTE", "PB", ) ,
("250570", "<NAME>", "PB", ) ,
("250580", "DUAS ESTRADAS", "PB", ) ,
("250590", "EMAS", "PB", ) ,
("250600", "ESPERANÇA", "PB", ) ,
("250610", "FAGUNDES", "PB", ) ,
("250620", "<NAME>", "PB", ) ,
("250625", "<NAME>", "PB", ) ,
("250630", "GUARABIRA", "PB", ) ,
("250640", "GURINHÉM", "PB", ) ,
("250650", "GURJÃO", "PB", ) ,
("250660", "IBIARA", "PB", ) ,
("250670", "IMACULADA", "PB", ) ,
("250680", "INGÁ", "PB", ) ,
("250690", "ITABAIANA", "PB", ) ,
("250700", "ITAPORANGA", "PB", ) ,
("250710", "ITAPOROROCA", "PB", ) ,
("250720", "ITATUBA", "PB", ) ,
("250730", "JACARAÚ", "PB", ) ,
("250740", "JERICÓ", "PB", ) ,
("250750", "<NAME>", "PB", ) ,
("250760", "<NAME>", "PB", ) ,
("250770", "JUAZEIRINHO", "PB", ) ,
("250780", "<NAME>", "PB", ) ,
("250790", "JURIPIRANGA", "PB", ) ,
("250800", "JURU", "PB", ) ,
("250810", "LAGOA", "PB", ) ,
("250820", "<NAME>", "PB", ) ,
("250830", "LAGOA SECA", "PB", ) ,
("250840", "LASTRO", "PB", ) ,
("250850", "LIVRAMENTO", "PB", ) ,
("250855", "LOGRADOURO", "PB", ) ,
("250860", "LUCENA", "PB", ) ,
("250870", "<NAME>", "PB", ) ,
("250880", "MALTA", "PB", ) ,
("250890", "MAMANGUAPE", "PB", ) ,
("250900", "MANAÍRA", "PB", ) ,
("250905", "MARCAÇÃO", "PB", ) ,
("250910", "MARI", "PB", ) ,
("250915", "MARIZÓPOLIS", "PB", ) ,
("250920", "MASSARANDUBA", "PB", ) ,
("250930", "MATARACA", "PB", ) ,
("250933", "MATINHAS", "PB", ) ,
("250937", "<NAME>", "PB", ) ,
("250939", "MATURÉIA", "PB", ) ,
("250940", "MOGEIRO", "PB", ) ,
("250950", "MONTADAS", "PB", ) ,
("250960", "<NAME>", "PB", ) ,
("250970", "MONTEIRO", "PB", ) ,
("250980", "MULUNGU", "PB", ) ,
("250990", "NATUBA", "PB", ) ,
("251000", "NAZAREZINHO", "PB", ) ,
("251010", "<NAME>", "PB", ) ,
("251020", "<NAME>", "PB", ) ,
("251030", "<NAME>", "PB", ) ,
("251040", "<NAME>", "PB", ) ,
("251050", "OLIVEDOS", "PB", ) ,
("251060", "<NAME>", "PB", ) ,
("251065", "PARARI", "PB", ) ,
("251070", "PASSAGEM", "PB", ) ,
("251080", "PATOS", "PB", ) ,
("251090", "PAULISTA", "PB", ) ,
("251100", "<NAME>", "PB", ) ,
("251110", "<NAME>", "PB", ) ,
("251120", "<NAME>", "PB", ) ,
("251130", "PIANCÓ", "PB", ) ,
("251140", "PICUÍ", "PB", ) ,
("251150", "PILAR", "PB", ) ,
("251160", "PILÕES", "PB", ) ,
("251170", "PILÕEZINHOS", "PB", ) ,
("251180", "PIRPIRITUBA", "PB", ) ,
("251190", "PITIMBU", "PB", ) ,
("251200", "POCINHOS", "PB", ) ,
("251203", "<NAME>", "PB", ) ,
("251207", "<NAME>", "PB", ) ,
("251210", "POMBAL", "PB", ) ,
("251220", "PRATA", "PB", ) ,
("251230", "<NAME>", "PB", ) ,
("251240", "PUXINANÃ", "PB", ) ,
("251250", "QUEIMADAS", "PB", ) ,
("251260", "QUIXABÁ", "PB", ) ,
("251270", "REMÍGIO", "PB", ) ,
("251272", "<NAME>", "PB", ) ,
("251274", "RIACHÃO", "PB", ) ,
("251275", "<NAME>", "PB", ) ,
("251276", "<NAME>", "PB", ) ,
("251278", "<NAME>", "PB", ) ,
("251280", "<NAME>", "PB", ) ,
("251290", "<NAME>", "PB", ) ,
("251300", "SALGADINHO", "PB", ) ,
("251310", "<NAME>", "PB", ) ,
("251315", "<NAME>", "PB", ) ,
("251320", "<NAME>", "PB", ) ,
("251330", "<NAME>", "PB", ) ,
("251335", "<NAME>", "PB", ) ,
("251340", "<NAME>", "PB", ) ,
("251350", "<NAME>", "PB", ) ,
("251360", "<NAME>", "PB", ) ,
("251365", "SANTARÉM", "PB", ) ,
("251370", "<NAME>", "PB", ) ,
("251380", "<NAME>", "PB", ) ,
("251385", "<NAME>", "PB", ) ,
("251390", "<NAME>", "PB", ) ,
("251392", "<NAME>", "PB", ) ,
("251394", "SÃO DOMINGOS DO CARIRI", "PB", ) ,
("251396", "SÃO DOMINGOS", "PB", ) ,
("251398", "SÃ<NAME>", "PB", ) ,
("251400", "SÃO JOÃO DO CARIRI", "PB", ) ,
| |
# Apache License Version 2.0
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allel
import gzip
import os
import numpy as np
import pandas as pd
from multiprocessing import Process, Queue
from sstar.utils import read_data, py2round, read_mapped_region_file, cal_match_pct
#@profile
def cal_pvalue(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file, output, thread, score_file, ref_match_pct_file, mapped_region_file, low_memory, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Calculate p-values for S* haplotypes in the target population with source genomes.
Arguments:
vcf str: Name of the VCF file containing genotypes.
src_vcf str: Name of the VCF file containing genotypes from source populations.
ref_ind_file str: Name of the file containing sample information from reference populations.
tgt_ind_file str: Name of the file containing sample information from target populations.
src_ind_file str: Name of the file containing sample information from source populations.
anc_allele_file str: Name of the file containing ancestral allele information.
output str: Name of the output file.
thread int: Number of threads.
score_file str: Name of the file containing S* scores calculated by `s-star score`.
ref_match_pct_file str: Names of the files containing match percents in reference populations calculated by `s-star rmatch`.
mapped_region_file str: Name of the BED file containing mapped regions.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
"""
ref_data, ref_samples, tgt_data, tgt_samples, src_data, src_samples = read_data(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file)
res = []
chr_names = ref_data.keys()
mapped_intervals = read_mapped_region_file(mapped_region_file)
data, windows, samples = _read_score_file(score_file, chr_names, tgt_samples)
sample_size = len(samples)
header = 'chrom\tstart\tend\tsample\tp-value\t'
header += 'src_sample\thap_index\tS*_start\tS*_end\tS*_SNP_num\t'
header += "hap_dSNV_num\thap_len\thap_mapped_len\thap_match_num\thap_tot_num\thap_dSNP_per_site_num\thap_S*_match(%)\thap_num_match_ref"
# Read match percents in reference populations from a file
# Use whole-genome match percents as the null distributions
if low_memory:
try:
ref_match_pct = pd.read_csv(ref_match_pct_file, compression="gzip", sep="\t")
except:
ref_match_pct = pd.read_csv(ref_match_pct_file, sep="\t")
query_ref_match_pct = _query_ref_match_pct_pandas
else:
ref_match_pct = _read_ref_match_pct_file(ref_match_pct_file)
query_ref_match_pct = _query_ref_match_pct_naive
#for s in samples[0:1]:
# i = samples.index(s)
# res = _cal_pvalue_ind(data[s], i, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)
if thread is None: thread = min(os.cpu_count()-1, sample_size)
res = _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp)
with open(output, 'w') as o:
o.write(header+"\n")
o.write("\n".join(res)+"\n")
#@profile
def _read_score_file(score_file, chr_names, tgt_samples):
"""
Description:
Helper function for reading the file generated by `sstar score`.
Arguments:
score_file str: Name of the file containing S* scores generated by `sstar score`.
chr_names list: List containing names of chromosomes for analysis.
tgt_samples list: List containing names of samples from the target population for analysis.
Returns:
data dict: Dictionary containing S* for analysis.
windows dict: Dictionary containing windows for analysis.
header str: Header from the file generated by `sstar score`.
samples list: List containing names of samples in the target population for analysis.
"""
data = dict()
windows = dict()
for c in chr_names:
windows[c] = []
samples = []
with open(score_file, 'r') as f:
header = f.readline().rstrip()
for line in f.readlines():
line = line.rstrip()
elements = line.split("\t")
chr_name = elements[0]
win_start = elements[1]
win_end = elements[2]
sample = elements[3]
if sample not in tgt_samples: continue
if elements[6] == 'NA': continue
if sample not in data.keys():
data[sample] = []
samples.append(sample)
data[sample].append(line)
star_snps = elements[-1].split(",")
windows[c].append((int(win_start), int(win_end)))
windows[c].append((int(star_snps[0]), int(star_snps[-1])))
return data, windows, samples
#@profile
def _read_ref_match_pct_file(ref_match_pct_file):
"""
Description:
Helper function for reading match percents from the reference population.
Arguments:
ref_match_pct_file str: Name of the file containing match percents from the reference population.
Returns:
ref_match_pct dict: Dictionary containing match percents from the reference population.
"""
f = gzip.open(ref_match_pct_file, 'rt')
try:
f.readline()
except:
f.close()
f = open(ref_match_pct_file, 'r')
f.readline()
ref_match_pct = dict()
for line in f.readlines():
elements = line.rstrip().split("\t")
count = int(elements[0])
mapped_bases_bin = int(elements[1])
hap_len = int(elements[2])
mh_sites = int(elements[3])
tot_sites = int(elements[4])
sfs = float(elements[5])
match = float(elements[6])
if mapped_bases_bin not in ref_match_pct.keys(): ref_match_pct[mapped_bases_bin] = dict()
if hap_len not in ref_match_pct[mapped_bases_bin].keys(): ref_match_pct[mapped_bases_bin][hap_len] = dict()
if mh_sites not in ref_match_pct[mapped_bases_bin][hap_len].keys(): ref_match_pct[mapped_bases_bin][hap_len][mh_sites] = dict()
if sfs not in ref_match_pct[mapped_bases_bin][hap_len][mh_sites].keys():
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs] = dict()
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'] = []
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'] = []
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'].append(count)
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'].append(match / tot_sites)
f.close()
return ref_match_pct
def _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Manager function to calculate match percents in target populations using multiprocessing.
Arguments:
data dict: Lines from the output file created by `sstar score`.
mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome.
sample list: Sample information for individuals needed to be estimated match percents.
tgt_samples list: Sample information from target populations.
src_samples list: Sample information from source populations.
tgt_data dict: Genotype data from target populations.
src_data dict: Genotype data from source populations.
ref_match_pct dict: Match percents calculated from reference populations.
sample_size int: Number of individuals analyzed.
query_ref_match_pct func: Function used to query match percentage from reference populations.
thread int: Number of threads.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
Returns:
res list: Match percents for target populations.
"""
try:
from pytest_cov.embed import cleanup_on_sigterm
except ImportError:
pass
else:
cleanup_on_sigterm()
res = []
in_queue, out_queue = Queue(), Queue()
workers = [Process(target=_cal_tgt_match_pct_worker, args=(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, len(tgt_samples), query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)) for ii in range(thread)]
for t in samples:
index = tgt_samples.index(t)
in_queue.put((index, data[t]))
try:
for worker in workers:
worker.start()
for s in range(sample_size):
item = out_queue.get()
if item != '': res.append(item)
for worker in workers:
worker.terminate()
finally:
for worker in workers:
worker.join()
return res
def _cal_tgt_match_pct_worker(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Worker function to calculate match percents in target populations.
Arguments:
in_queue multiprocessing.Queue: multiprocessing.Queue instance to receive parameters from the manager.
out_queue multiprocessing.Queue: multiprocessing.Queue instance to send results back to the manager.
mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome.
tgt_data dict: Genotype data from target populations.
src_data dict: Genotype data from source populations.
src_samples list: List containing sample information for source populations.
ref_match_pct dict: Match percents in reference populations as the null distribution.
sample_size int: Number of individuals analyzed.
query_ref_match_pct func: Function used to query match percentages from reference popualtions.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
"""
while True:
index, data = in_queue.get()
res = _cal_pvalue_ind(data, index, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)
out_queue.put("\n".join(res))
#@profile
def _get_ssnps_range(chr_name, data, ind_index, hap_index, win_start, win_end, s_star_snps):
"""
Description:
Helper function to obtain the range of a haplotype containing S* SNPs.
If the haplotype contains less than two S* SNPs, it will be ignored.
Arguments:
chr_name str: Name of the chromosome.
data dict: Dictionary containing genotype data and position information.
ind_index int: Index of the individual carrying S* SNPs.
hap_index int: Index of the haplotype carrying S* SNPs.
win_start int: Start position of the local window containing S* SNPs.
wind_end int: End position of the local window containing S* SNPs.
s_star_snps list: List containing positions of S* SNPs.
Returns:
hap_pos_min int: Start position of the haplotype.
hap_pos_max int: End position of the haplotype.
"""
gt = data[chr_name]['GT']
pos = data[chr_name]['POS']
sub_snps = np.where((pos>=win_start) & (pos<=win_end))[0]
sub_gt = gt[sub_snps][:,ind_index]
sub_pos = pos[sub_snps]
| |
k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i | |
import copy
import itertools
import logging
import sys
from collections import namedtuple
from .task import NeedInputsException
from .task import NEED_INPUTS, OPEN_STATUS_LIST, BAD_QA_STATUS,\
JOB_PENDING, REPROC, RERUN, FAILED_NEEDS_REPROC, NEEDS_QA
from . import XnatUtils
from . import utilities
LOGGER = logging.getLogger('dax')
select_namespace = {
'foreach': {'args': [{'optional': True, 'type': str}]},
'one': {'args': []},
'some': {'args': [{'optional': False, 'type': int}]},
'all': {'args': []},
'from': {'args': [{'optional': False, 'type': str}]}
}
select_session_namespace = {
'current': {'args': []},
'prior': {'args': [{'optional': False, 'type': int}]},
'prior-with': {'args': [{'optional': False, 'type': int}]},
'first': {'args': []},
'first-with': {'args': []}
}
no_scans_error = 'No scan of the required type/s ({}) found for input {}'
no_asrs_error = 'No assessors of the require type/s ({}) found for input {}'
scan_unusable_error = 'Scan {} is unusable for input {}'
asr_unusable_error = 'Assessor {} is unusable for input {}'
missing_field_unnamed = 'Error: {} at position {} is missing {} field'
missing_field_named = "Error: {} '{}' is missing '{}' field"
bad_mode = ("Error: {} '{}': '{}' has an invalid value '{}'. "
"It must be one of {}")
missing_resource_field_unnamed = \
"Error in {} '{}': missing {} from resource at position {}"
missing_resource_field_named = \
"Error in {} '{}': missing {} from resource field '{}'"
bad_resource_mode = \
"Error in {} '{}'; resource field '{}' has an invalid value"
resource_paths = {
'assessor': '{0}/out/resources/{1}',
'scan': '{0}/resources/{1}'
}
uri_paths = {
'assessor': '{0}/data{1}/out/resources/{2}',
'scan': '{0}/data{1}/resources/{2}'
}
# parser pipeline
# . check whether artefacts of the appropriate type are present for a given
# assessor
# . if they are, map them to inputs with the appropriate iteration
# . if no foreach select statements are present, generate one set of command
# parameters
# . if one or more foreach select statements are present, generate the
# appropriate cartesian product of command parameters
# . for each set of command parameters generated, create an assessor depending
# on the state of the artefacts listed in the command parameters
# . if one or more artefacts are of inappropriate quality
# BDB 6/5/21
# Removed inputs_by_type and related functions b/c it was not actually used.
# Also removed several class objects from ProcessorParser that
# were only used within scope of parse_session function
class ParserArtefact:
def __init__(self, path, resources, entity):
self.name = path.split('/')[-1]
self.path = path
self.resources = resources,
self.entity = entity
def __repr__(self):
return '{}(path = {}, resources = {}, entity = {})'.format(
self.__class__.__name__, self.path, self.resources, self.entity
)
class SelectSessionParameters:
def __init__(self, mode, delta):
self.mode = mode
self.delta = delta
def __repr__(self):
return '{}(mode = {}, delta = {})'.format(self.__class__.__name__,
self.mode,
self.delta)
TimestampSession = namedtuple('TimestampSession', 'timestamp, session')
ArtefactEntry = namedtuple('ArtefactEntry', 'path, type, object')
class ProcessorParser:
__schema_dict_v1 = {
'top': set(['schema', 'inputs', 'xnat', 'attrs']),
'xnat': set(['scans', 'assessors']),
'scans': set(['select', 'types', 'nargs', 'resources', 'needs_qc']),
'assessors': set(['select', 'types', 'nargs', 'resources', 'needs_qc']),
'resources': set(['resource', 'varname', 'required'])
}
def __init__(self, yaml_source, proctype=None):
(self.inputs, self.iteration_sources,
self.iteration_map, self.prior_session_count) =\
ProcessorParser.parse_inputs(yaml_source)
self.match_filters = ProcessorParser.parse_match_filters(yaml_source)
self.variables_to_inputs = ProcessorParser.parse_variables(self.inputs)
self.xsitype = yaml_source['attrs'].get('xsitype', 'proc:genProcData')
if proctype:
self.proctype = proctype
else:
self.proctype = XnatUtils.get_proctype(
yaml_source['inputs']['default']['spider_path'])[0]
self.is_longitudinal_ = ProcessorParser.is_longitudinal(yaml_source)
def parse_session(self, csess, sessions, pets=[]):
"""
Parse a session to determine whether new assessors should be created.
This call populates assessor_parameter_map.
:param csess: the session in question
:param sessions: the full list of sessions, including csess, for the
subject
:return: None
"""
for i in range(len(sessions) - 1):
if sessions[i].creation_timestamp() <\
sessions[i+1].creation_timestamp():
raise ValueError("session param is not ordered by datetime")
if not self.is_longitudinal_:
relevant_sessions = [csess]
else:
index = sessions.index(csess)
relevant_sessions = sessions[index:]
# BDB 6/5/21
# only include pets if this is the first mr session
if sessions.index(csess) == (len(sessions) - 1):
LOGGER.debug('session is first, including pets')
else:
LOGGER.debug('session is not first, not including pets')
pets = []
artefacts = ProcessorParser.parse_artefacts(relevant_sessions, pets)
# BDB 6/5/21
# The artefacts are a dictionary where the index key is the
# relative path of scan or assessor:
# /projects/PROJ/subjects/SUBJ/experiments/SESS/assessors/ASSR
# for every single assessor or scan. the value in the dictionary
# is a ParserArtefact object the includes a list of the scan/assr's
# resources and a CachedAssessor object. This can be used later
# to quickly access this information
# BDB 6/5/21
# next we will create a dictionary of just the artefacts for each of
# the inputs map the artefacts to the inputs, this is where
# we filter down the whole session to the types of scan/assessors we
# want. Then we decide what to do with the different combinations of
# those scans/assessors if we find multiple per input.
# maybe we should change the names?
# artefacts --> all_artefacts or all_session_arefacts
# artefacts_by_inputs --> input_artefacts_by_input or something
artefacts_by_input = ProcessorParser.map_artefacts_to_inputs(
relevant_sessions, self.inputs, pets)
# BDB 6/5/21
# at this point the pet scan should be just like any other input or
# artefact, it's just a path
# BDB 6/5/21
# artefacts_by_input is a dictionary where the key is the
# input name and the value is a list of artefact paths that match
# the input.
# These artefact paths are keys into the artefacts dictionary.
parameter_matrix = \
ProcessorParser.generate_parameter_matrix(
self.inputs,
self.iteration_sources,
self.iteration_map,
artefacts,
artefacts_by_input)
# BDB 6/5/21
# parameter_matrix is the combinations of inputs from the lists in
# artefacts_by_inputs. I think these are the cartesian product
# of lists in artefacts_by_input.
# BDB 6/5/21
# Next we filter down the combinations by applying
# any filters included in the yaml. currently
# the only filter supported is a match filter
# which help us only include combinations where one of the inputs
# is the same, e.g. the same T1 input
# This functions uses the artefacts dictionary to get the inputs field
# from each artefact for comparison.
parameter_matrix = ProcessorParser.filter_matrix(
parameter_matrix,
self.match_filters,
artefacts)
# BDB 6/5/21
# And now we use the parameter matrix as a list of what set of inputs
# we need assessors for
# by mapping to what assessors already exist by comparing
# the inputs field on existing assessors with our list of inputs
assessor_parameter_map = \
ProcessorParser.compare_to_existing(relevant_sessions,
self.proctype,
parameter_matrix)
# BDB 6/5/21
# assessor_parameter_map is list of tuples
# where each tuple is (inputs, assessor(s)) (if assesors exists already),
# if assessors don't exist assessors will empty list
# BDB 6/5/21
# so what we are returning is a list of tuples
# (set of inputs, existing asessors for these inputs)
return list(assessor_parameter_map)
def get_variable_set(self, assr):
assr_inputs = XnatUtils.get_assessor_inputs(assr)
# map from parameters to input resources
command_set = dict()
for k, v in list(self.variables_to_inputs.items()):
inp = self.inputs[v['input']]
artefact_type = inp['artefact_type']
resource = v['resource']
path_elements = [assr_inputs[v['input']], resource]
command_set[k] =\
resource_paths[artefact_type].format(*path_elements)
return command_set
def find_inputs(self, assr, sessions, assr_inputs):
variable_set = {}
input_list = []
# Check artefact status
LOGGER.debug('checking status of each artefact')
for artk, artv in list(assr_inputs.items()):
LOGGER.debug('checking status:' + artk)
inp = self.inputs[artk]
art_type = inp['artefact_type']
if art_type == 'scan' and not inp['needs_qc']:
continue
if art_type == 'scan':
# Check status of each input scan
for vinput in artv:
qstatus = XnatUtils.get_scan_status(sessions, vinput)
if qstatus.lower() == 'unusable':
raise NeedInputsException(artk + ': Not Usable')
else:
# Check status of each input assr
for vinput in artv:
pstatus, qstatus = XnatUtils.get_assr_status(sessions, vinput)
if pstatus in OPEN_STATUS_LIST + [NEED_INPUTS]:
raise NeedInputsException(artk + ': Not Ready')
if qstatus in [JOB_PENDING, REPROC, RERUN]:
raise NeedInputsException(artk + ': Not Ready')
if not inp['needs_qc']:
continue
if (qstatus in [FAILED_NEEDS_REPROC, NEEDS_QA]):
raise NeedInputsException(artk + ': Needs QC')
for badstatus in BAD_QA_STATUS:
if badstatus.lower() in qstatus.split(' ')[0].lower():
raise NeedInputsException(artk + ': Bad QC')
# Map from parameters to input resources
LOGGER.debug('mapping params to artefact resources')
for k, v in list(self.variables_to_inputs.items()):
LOGGER.debug('mapping:' + k)
inp = self.inputs[v['input']]
artefact_type = inp['artefact_type']
resource = v['resource']
# Find the resource
cur_res = None
for inp_res in inp['resources']:
if inp_res['varname'] == k:
cur_res = inp_res
break
# TODO: optimize this to get resource list only once
for vnum, vinput in enumerate(assr_inputs[v['input']]):
robj = assr._intf.select(
resource_paths[artefact_type].format(
vinput, resource))
# Get list of all files in the resource, relative paths
file_list = [x._urn for x in robj.files().get('path')]
if len(file_list) == 0:
LOGGER.debug('empty | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._policy_client_enums import *
class Alias(msrest.serialization.Model):
"""The alias type.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The alias name.
:type name: str
:param paths: The paths for an alias.
:type paths: list[~azure.mgmt.resource.policy.v2021_06_01.models.AliasPath]
:param type: The type of the alias. Possible values include: "NotSpecified", "PlainText",
"Mask".
:type type: str or ~azure.mgmt.resource.policy.v2021_06_01.models.AliasType
:param default_path: The default path for an alias.
:type default_path: str
:param default_pattern: The default pattern for an alias.
:type default_pattern: ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPattern
:ivar default_metadata: The default alias path metadata. Applies to the default path and to any
alias path that doesn't have metadata.
:vartype default_metadata: ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPathMetadata
"""
_validation = {
'default_metadata': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[AliasPath]'},
'type': {'key': 'type', 'type': 'str'},
'default_path': {'key': 'defaultPath', 'type': 'str'},
'default_pattern': {'key': 'defaultPattern', 'type': 'AliasPattern'},
'default_metadata': {'key': 'defaultMetadata', 'type': 'AliasPathMetadata'},
}
def __init__(
self,
*,
name: Optional[str] = None,
paths: Optional[List["AliasPath"]] = None,
type: Optional[Union[str, "AliasType"]] = None,
default_path: Optional[str] = None,
default_pattern: Optional["AliasPattern"] = None,
**kwargs
):
super(Alias, self).__init__(**kwargs)
self.name = name
self.paths = paths
self.type = type
self.default_path = default_path
self.default_pattern = default_pattern
self.default_metadata = None
class AliasPath(msrest.serialization.Model):
"""The type of the paths for alias.
Variables are only populated by the server, and will be ignored when sending a request.
:param path: The path of an alias.
:type path: str
:param api_versions: The API versions.
:type api_versions: list[str]
:param pattern: The pattern for an alias path.
:type pattern: ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPattern
:ivar metadata: The metadata of the alias path. If missing, fall back to the default metadata
of the alias.
:vartype metadata: ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPathMetadata
"""
_validation = {
'metadata': {'readonly': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'pattern': {'key': 'pattern', 'type': 'AliasPattern'},
'metadata': {'key': 'metadata', 'type': 'AliasPathMetadata'},
}
def __init__(
self,
*,
path: Optional[str] = None,
api_versions: Optional[List[str]] = None,
pattern: Optional["AliasPattern"] = None,
**kwargs
):
super(AliasPath, self).__init__(**kwargs)
self.path = path
self.api_versions = api_versions
self.pattern = pattern
self.metadata = None
class AliasPathMetadata(msrest.serialization.Model):
"""AliasPathMetadata.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of the token that the alias path is referring to. Possible values include:
"NotSpecified", "Any", "String", "Object", "Array", "Integer", "Number", "Boolean".
:vartype type: str or ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPathTokenType
:ivar attributes: The attributes of the token that the alias path is referring to. Possible
values include: "None", "Modifiable".
:vartype attributes: str or ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPathAttributes
"""
_validation = {
'type': {'readonly': True},
'attributes': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AliasPathMetadata, self).__init__(**kwargs)
self.type = None
self.attributes = None
class AliasPattern(msrest.serialization.Model):
"""The type of the pattern for an alias path.
:param phrase: The alias pattern phrase.
:type phrase: str
:param variable: The alias pattern variable.
:type variable: str
:param type: The type of alias pattern. Possible values include: "NotSpecified", "Extract".
:type type: str or ~azure.mgmt.resource.policy.v2021_06_01.models.AliasPatternType
"""
_attribute_map = {
'phrase': {'key': 'phrase', 'type': 'str'},
'variable': {'key': 'variable', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
phrase: Optional[str] = None,
variable: Optional[str] = None,
type: Optional[Union[str, "AliasPatternType"]] = None,
**kwargs
):
super(AliasPattern, self).__init__(**kwargs)
self.phrase = phrase
self.variable = variable
self.type = type
class DataEffect(msrest.serialization.Model):
"""The data effect definition.
:param name: The data effect name.
:type name: str
:param details_schema: The data effect details schema.
:type details_schema: any
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'details_schema': {'key': 'detailsSchema', 'type': 'object'},
}
def __init__(
self,
*,
name: Optional[str] = None,
details_schema: Optional[Any] = None,
**kwargs
):
super(DataEffect, self).__init__(**kwargs)
self.name = name
self.details_schema = details_schema
class DataManifestCustomResourceFunctionDefinition(msrest.serialization.Model):
"""The custom resource function definition.
:param name: The function name as it will appear in the policy rule. eg - 'vault'.
:type name: str
:param fully_qualified_resource_type: The fully qualified control plane resource type that this
function represents. eg - 'Microsoft.KeyVault/vaults'.
:type fully_qualified_resource_type: str
:param default_properties: The top-level properties that can be selected on the function's
output. eg - [ "name", "location" ] if vault().name and vault().location are supported.
:type default_properties: list[str]
:param allow_custom_properties: A value indicating whether the custom properties within the
property bag are allowed. Needs api-version to be specified in the policy rule eg -
vault('2019-06-01').
:type allow_custom_properties: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'fully_qualified_resource_type': {'key': 'fullyQualifiedResourceType', 'type': 'str'},
'default_properties': {'key': 'defaultProperties', 'type': '[str]'},
'allow_custom_properties': {'key': 'allowCustomProperties', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
fully_qualified_resource_type: Optional[str] = None,
default_properties: Optional[List[str]] = None,
allow_custom_properties: Optional[bool] = None,
**kwargs
):
super(DataManifestCustomResourceFunctionDefinition, self).__init__(**kwargs)
self.name = name
self.fully_qualified_resource_type = fully_qualified_resource_type
self.default_properties = default_properties
self.allow_custom_properties = allow_custom_properties
class DataPolicyManifest(msrest.serialization.Model):
"""The data policy manifest.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the data policy manifest.
:vartype id: str
:ivar name: The name of the data policy manifest (it's the same as the Policy Mode).
:vartype name: str
:ivar type: The type of the resource (Microsoft.Authorization/dataPolicyManifests).
:vartype type: str
:param namespaces: The list of namespaces for the data policy manifest.
:type namespaces: list[str]
:param policy_mode: The policy mode of the data policy manifest.
:type policy_mode: str
:param is_built_in_only: A value indicating whether policy mode is allowed only in built-in
definitions.
:type is_built_in_only: bool
:param resource_type_aliases: An array of resource type aliases.
:type resource_type_aliases:
list[~azure.mgmt.resource.policy.v2021_06_01.models.ResourceTypeAliases]
:param effects: The effect definition.
:type effects: list[~azure.mgmt.resource.policy.v2021_06_01.models.DataEffect]
:param field_values: The non-alias field accessor values that can be used in the policy rule.
:type field_values: list[str]
:param standard: The standard resource functions (subscription and/or resourceGroup).
:type standard: list[str]
:param custom: An array of data manifest custom resource definition.
:type custom:
list[~azure.mgmt.resource.policy.v2021_06_01.models.DataManifestCustomResourceFunctionDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'namespaces': {'key': 'properties.namespaces', 'type': '[str]'},
'policy_mode': {'key': 'properties.policyMode', 'type': 'str'},
'is_built_in_only': {'key': 'properties.isBuiltInOnly', 'type': 'bool'},
'resource_type_aliases': {'key': 'properties.resourceTypeAliases', 'type': '[ResourceTypeAliases]'},
'effects': {'key': 'properties.effects', 'type': '[DataEffect]'},
'field_values': {'key': 'properties.fieldValues', 'type': '[str]'},
'standard': {'key': 'properties.resourceFunctions.standard', 'type': '[str]'},
'custom': {'key': 'properties.resourceFunctions.custom', 'type': '[DataManifestCustomResourceFunctionDefinition]'},
}
def __init__(
self,
*,
namespaces: Optional[List[str]] = None,
policy_mode: Optional[str] = None,
is_built_in_only: Optional[bool] = None,
resource_type_aliases: Optional[List["ResourceTypeAliases"]] = None,
effects: Optional[List["DataEffect"]] = None,
field_values: Optional[List[str]] = None,
standard: Optional[List[str]] = None,
custom: Optional[List["DataManifestCustomResourceFunctionDefinition"]] = None,
**kwargs
):
super(DataPolicyManifest, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.namespaces = namespaces
self.policy_mode = policy_mode
self.is_built_in_only = is_built_in_only
self.resource_type_aliases = resource_type_aliases
self.effects = effects
self.field_values = field_values
self.standard = standard
self.custom = custom
class DataPolicyManifestListResult(msrest.serialization.Model):
"""List of data policy manifests.
:param value: An array of data policy manifests.
:type value: list[~azure.mgmt.resource.policy.v2021_06_01.models.DataPolicyManifest]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DataPolicyManifest]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["DataPolicyManifest"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(DataPolicyManifestListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.